openai_text_to_speech_driver Innovation Release
Bases:
BaseTextToSpeechDriver
Source Code in griptape/drivers/text_to_speech/openai_text_to_speech_driver.py
@define class OpenAiTextToSpeechDriver(BaseTextToSpeechDriver): model: str = field(default="tts-1", kw_only=True, metadata={"serializable": True}) voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"] = field( default="alloy", kw_only=True, metadata={"serializable": True}, ) format: Literal["mp3", "opus", "aac", "flac"] = field(default="mp3", kw_only=True, metadata={"serializable": True}) api_type: Optional[str] = field(default=openai.api_type, kw_only=True) api_version: Optional[str] = field(default=openai.api_version, kw_only=True, metadata={"serializable": True}) base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True) organization: Optional[str] = field(default=openai.organization, kw_only=True, metadata={"serializable": True}) _client: Optional[openai.OpenAI] = field( default=None, kw_only=True, alias="client", metadata={"serializable": False} ) @lazy_property() def client(self) -> openai.OpenAI: return openai.OpenAI( api_key=self.api_key, base_url=self.base_url, organization=self.organization, ) def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: response = self.client.audio.speech.create( input=". ".join(prompts), voice=self.voice, model=self.model, response_format=self.format, ) return AudioArtifact(value=response.content, format=self.format)
_client = field(default=None, kw_only=True, alias='client', metadata={'serializable': False})class-attribute instance-attributeapi_key = field(default=None, kw_only=True)class-attribute instance-attributeapi_type = field(default=openai.api_type, kw_only=True)class-attribute instance-attributeapi_version = field(default=openai.api_version, kw_only=True, metadata={'serializable': True})class-attribute instance-attributebase_url = field(default=None, kw_only=True, metadata={'serializable': True})class-attribute instance-attributeformat = field(default='mp3', kw_only=True, metadata={'serializable': True})class-attribute instance-attributemodel = field(default='tts-1', kw_only=True, metadata={'serializable': True})class-attribute instance-attributeorganization = field(default=openai.organization, kw_only=True, metadata={'serializable': True})class-attribute instance-attributevoice = field(default='alloy', kw_only=True, metadata={'serializable': True})class-attribute instance-attribute
client()
Source Code in griptape/drivers/text_to_speech/openai_text_to_speech_driver.py
@lazy_property() def client(self) -> openai.OpenAI: return openai.OpenAI( api_key=self.api_key, base_url=self.base_url, organization=self.organization, )
try_text_to_audio(prompts)
Source Code in griptape/drivers/text_to_speech/openai_text_to_speech_driver.py
def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: response = self.client.audio.speech.create( input=". ".join(prompts), voice=self.voice, model=self.model, response_format=self.format, ) return AudioArtifact(value=response.content, format=self.format)
- On this page
- client()
- try_text_to_audio(prompts)