Skip to content

OpenAIResponsesConfig

autogen.beta.config.openai.config.OpenAIResponsesConfig dataclass #

OpenAIResponsesConfig(model, api_key=None, base_url=None, temperature=None, top_p=None, streaming=False, max_output_tokens=None, max_tool_calls=None, store=True, websocket_base_url=None, organization=None, project=None, timeout=not_given, max_retries=DEFAULT_MAX_RETRIES, default_headers=None, default_query=None, http_client=None, parallel_tool_calls=True, top_logprobs=None, metadata=None, service_tier=None, user='', truncation=None)

Bases: ModelConfig

model instance-attribute #

model

api_key class-attribute instance-attribute #

api_key = None

base_url class-attribute instance-attribute #

base_url = None

temperature class-attribute instance-attribute #

temperature = None

top_p class-attribute instance-attribute #

top_p = None

streaming class-attribute instance-attribute #

streaming = False

max_output_tokens class-attribute instance-attribute #

max_output_tokens = None

max_tool_calls class-attribute instance-attribute #

max_tool_calls = None

store class-attribute instance-attribute #

store = True

websocket_base_url class-attribute instance-attribute #

websocket_base_url = None

organization class-attribute instance-attribute #

organization = None

project class-attribute instance-attribute #

project = None

timeout class-attribute instance-attribute #

timeout = not_given

max_retries class-attribute instance-attribute #

max_retries = DEFAULT_MAX_RETRIES

default_headers class-attribute instance-attribute #

default_headers = None

default_query class-attribute instance-attribute #

default_query = None

http_client class-attribute instance-attribute #

http_client = None

parallel_tool_calls class-attribute instance-attribute #

parallel_tool_calls = True

top_logprobs class-attribute instance-attribute #

top_logprobs = None

metadata class-attribute instance-attribute #

metadata = None

service_tier class-attribute instance-attribute #

service_tier = None

user class-attribute instance-attribute #

user = ''

truncation class-attribute instance-attribute #

truncation = None

copy #

copy(**overrides)
Source code in autogen/beta/config/openai/config.py
def copy(self, /, **overrides: Unpack[OpenAIResponsesConfigOverrides]) -> "OpenAIResponsesConfig":
    return replace(self, **overrides)

create #

create()
Source code in autogen/beta/config/openai/config.py
def create(self) -> OpenAIResponsesClient:
    options = CreateOptions(
        model=self.model,
        stream=self.streaming,
        temperature=self.temperature,
        top_p=self.top_p,
        max_output_tokens=self.max_output_tokens,
        max_tool_calls=self.max_tool_calls,
        store=self.store,
        parallel_tool_calls=self.parallel_tool_calls,
        top_logprobs=self.top_logprobs,
        metadata=self.metadata,
        service_tier=self.service_tier,
        truncation=self.truncation,
    )

    # Only include user if non-empty
    if self.user:
        options["user"] = self.user

    return OpenAIResponsesClient(
        api_key=self.api_key,
        organization=self.organization,
        project=self.project,
        base_url=self.base_url,
        websocket_base_url=self.websocket_base_url,
        timeout=self.timeout,
        max_retries=self.max_retries,
        default_headers=self.default_headers,
        default_query=self.default_query,
        http_client=self.http_client,
        create_options=options,
    )