STTConfig autogen.beta.live.openai.STTConfig # STTConfig(model, *, client=None) Bases: STTConfig Source code in autogen/beta/live/openai.py 138 139 140 141 142 143 144 145def __init__( self, model: "AudioModel | str", *, client: AsyncOpenAI | None = None, ) -> None: self.model = model self.client = client or AsyncOpenAI() model instance-attribute # model = model client instance-attribute # client = client or AsyncOpenAI() transcribe async # transcribe(voice, context) Source code in autogen/beta/live/openai.py 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162async def transcribe(self, voice: "VoiceInput", context: "Context") -> str: stream = await self.client.audio.transcriptions.create( model=self.model, file=_voice_to_wav_buffer(voice), response_format="text", stream=True, ) text = "" async for event in stream: if event.type == "transcript.text.delta": text += event.delta await context.send(TranscriptionChunkEvent(event.delta)) await context.send(TranscriptionCompletedEvent(text)) return text pipe # pipe(agent) Source code in autogen/beta/live/stt.py 34 35def pipe(self, agent: Agent) -> "VoicePipeline": return VoicePipeline(agent, self)