Bases: OpenAILLMConfigEntry
LLMConfig entry for the OpenAI Responses API (stateful, tool-enabled).
This reuses all the OpenAI fields but changes api_type so the wrapper can route traffic to the client.responses
endpoint instead of chat.completions
. It inherits everything else – including reasoning fields – from OpenAILLMConfigEntry so users can simply set
{
"api_type": "responses", # <-- key differentiator
"model": "o3", # reasoning model
"reasoning_effort": "medium", # low / medium / high
"stream": True,
}
api_type class-attribute
instance-attribute
tool_choice class-attribute
instance-attribute
built_in_tools class-attribute
instance-attribute
max_tokens class-attribute
instance-attribute
max_tokens = Field(default=None, ge=0)
top_p class-attribute
instance-attribute
top_p = Field(default=None, gt=0, lt=1)
temperature class-attribute
instance-attribute
temperature = Field(default=None, ge=0, le=1)
model class-attribute
instance-attribute
model = Field(..., min_length=1)
api_key class-attribute
instance-attribute
api_version class-attribute
instance-attribute
base_url class-attribute
instance-attribute
voice class-attribute
instance-attribute
model_client_cls class-attribute
instance-attribute
http_client class-attribute
instance-attribute
response_format class-attribute
instance-attribute
tags class-attribute
instance-attribute
tags = Field(default_factory=list)
model_config class-attribute
instance-attribute
model_config = ConfigDict(extra='allow', arbitrary_types_allowed=True)
price class-attribute
instance-attribute
price = Field(default=None, min_length=2, max_length=2)
user class-attribute
instance-attribute
stream class-attribute
instance-attribute
verbosity class-attribute
instance-attribute
extra_body class-attribute
instance-attribute
reasoning_effort class-attribute
instance-attribute
max_completion_tokens class-attribute
instance-attribute
max_completion_tokens = None
create_client
Source code in autogen/oai/client.py
| def create_client(self) -> ModelClient: # pragma: no cover
raise NotImplementedError("Handled via OpenAIWrapper._register_default_client")
|
apply_application_config
apply_application_config(application_config)
Apply application level configurations.
Source code in autogen/llm_config/entry.py
| def apply_application_config(self, application_config: ApplicationConfig) -> "LLMConfigEntry":
"""Apply application level configurations."""
# TODO: should create a new instance instead of mutating current one
self.max_tokens = self.max_tokens or application_config.max_tokens
self.top_p = self.top_p or application_config.top_p
self.temperature = self.temperature or application_config.temperature
return self
|
check_base_url classmethod
Source code in autogen/llm_config/entry.py
| @field_validator("base_url", mode="before")
@classmethod
def check_base_url(cls, v: HttpUrl | str | None, info: ValidationInfo) -> str | None:
if v is None: # Handle None case explicitly
return None
if not str(v).startswith("https://") and not str(v).startswith("http://"):
return f"http://{str(v)}"
return str(v)
|
serialize_base_url
Source code in autogen/llm_config/entry.py
| @field_serializer("base_url", when_used="unless-none") # Ensure serializer also respects None
def serialize_base_url(self, v: HttpUrl | None) -> str | None:
return str(v) if v is not None else None
|
serialize_api_key
Source code in autogen/llm_config/entry.py
| @field_serializer("api_key", when_used="unless-none")
def serialize_api_key(self, v: SecretStr) -> str:
return v.get_secret_value()
|
model_dump
model_dump(*args, exclude_none=True, **kwargs)
Source code in autogen/llm_config/entry.py
| def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
return BaseModel.model_dump(self, exclude_none=exclude_none, *args, **kwargs)
|
model_dump_json
model_dump_json(*args, exclude_none=True, **kwargs)
Source code in autogen/llm_config/entry.py
| def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
return BaseModel.model_dump_json(self, exclude_none=exclude_none, *args, **kwargs)
|
get
Source code in autogen/llm_config/entry.py
| def get(self, key: str, default: Any | None = None) -> Any:
val = getattr(self, key, default)
if isinstance(val, SecretStr):
return val.get_secret_value()
return val
|
items
Source code in autogen/llm_config/entry.py
| def items(self) -> Iterable[tuple[str, Any]]:
d = self.model_dump()
return d.items()
|
keys
Source code in autogen/llm_config/entry.py
| def keys(self) -> Iterable[str]:
d = self.model_dump()
return d.keys()
|
values
Source code in autogen/llm_config/entry.py
| def values(self) -> Iterable[Any]:
d = self.model_dump()
return d.values()
|