Skip to content

STTConfig

autogen.beta.live.openai.STTConfig #

STTConfig(model, *, client=None)

Bases: STTConfig

Source code in autogen/beta/live/openai.py
def __init__(
    self,
    model: "AudioModel | str",
    *,
    client: AsyncOpenAI | None = None,
) -> None:
    self.model = model
    self.client = client or AsyncOpenAI()

model instance-attribute #

model = model

client instance-attribute #

client = client or AsyncOpenAI()

transcribe async #

transcribe(voice, context)
Source code in autogen/beta/live/openai.py
async def transcribe(self, voice: "VoiceInput", context: "Context") -> str:
    stream = await self.client.audio.transcriptions.create(
        model=self.model,
        file=_voice_to_wav_buffer(voice),
        response_format="text",
        stream=True,
    )

    text = ""
    async for event in stream:
        if event.type == "transcript.text.delta":
            text += event.delta
            await context.send(TranscriptionChunkEvent(event.delta))

    await context.send(TranscriptionCompletedEvent(text))
    return text

pipe #

pipe(agent)
Source code in autogen/beta/live/stt.py
def pipe(self, agent: Agent) -> "VoicePipeline":
    return VoicePipeline(agent, self)