Skip to content

LLMConfig

autogen.LLMConfig #

LLMConfig(*, top_p=None, temperature=None, max_tokens=None, config_list=(), check_every_ms=None, allow_format_str_template=None, response_format=None, timeout=None, seed=None, cache_seed=None, parallel_tool_calls=None, tools=(), functions=(), routing_method=None, **kwargs)

Initializes the LLMConfig object.

PARAMETER DESCRIPTION
config_list

A list of LLM configuration entries or dictionaries.

TYPE: Iterable[ConfigItem] | dict[str, Any] DEFAULT: ()

temperature

The sampling temperature for LLM generation.

TYPE: float | None DEFAULT: None

check_every_ms

The interval (in milliseconds) to check for updates

TYPE: int | None DEFAULT: None

allow_format_str_template

Whether to allow format string templates.

TYPE: bool | None DEFAULT: None

response_format

The format of the response (e.g., JSON, text).

TYPE: str | dict[str, Any] | BaseModel | type[BaseModel] | None DEFAULT: None

timeout

The timeout for LLM requests in seconds.

TYPE: int | None DEFAULT: None

seed

The random seed for reproducible results.

TYPE: int | None DEFAULT: None

cache_seed

The seed for caching LLM responses.

TYPE: int | None DEFAULT: None

parallel_tool_calls

Whether to enable parallel tool calls.

TYPE: bool | None DEFAULT: None

tools

A list of tools available for the LLM.

TYPE: Iterable[Any] DEFAULT: ()

functions

A list of functions available for the LLM.

TYPE: Iterable[Any] DEFAULT: ()

max_tokens

The maximum number of tokens to generate.

TYPE: int | None DEFAULT: None

top_p

The nucleus sampling probability.

TYPE: float | None DEFAULT: None

routing_method

The method used to route requests (e.g., fixed_order, round_robin).

TYPE: Literal['fixed_order', 'round_robin'] | None DEFAULT: None

**kwargs

Additional keyword arguments for future extensions.

TYPE: Any DEFAULT: {}

Examples:

# Example 1: create config from `kwargs` options
config = LLMConfig(
    model="gpt-4o-mini",
    api_key=os.environ["OPENAI_API_KEY"],
)

# Example 2: create config from `config_list` dictionary
config = LLMConfig(
    config_list={
        "model": "gpt-4o-mini",
        "api_key": os.environ["OPENAI_API_KEY"],
    }
)

# Example 3: create config from `config_list` list
config = LLMConfig(
    config_list=[
        {
            "model": "gpt-4o-mini",
            "api_key": os.environ["OPENAI_API_KEY"],
        },
        {
            "model": "gpt-4",
            "api_key": os.environ["OPENAI_API_KEY"],
        },
    ]
)
Source code in autogen/llm_config/config.py
def __init__(
    self,
    *,
    top_p: float | None = None,
    temperature: float | None = None,
    max_tokens: int | None = None,
    config_list: Iterable[ConfigItem] | dict[str, Any] = (),
    check_every_ms: int | None = None,
    allow_format_str_template: bool | None = None,
    response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None = None,
    timeout: int | None = None,
    seed: int | None = None,
    cache_seed: int | None = None,
    parallel_tool_calls: bool | None = None,
    tools: Iterable[Any] = (),
    functions: Iterable[Any] = (),
    routing_method: Literal["fixed_order", "round_robin"] | None = None,
    **kwargs: Any,
) -> None:
    """Initializes the LLMConfig object.

    Args:
        config_list: A list of LLM configuration entries or dictionaries.
        temperature: The sampling temperature for LLM generation.
        check_every_ms: The interval (in milliseconds) to check for updates
        allow_format_str_template: Whether to allow format string templates.
        response_format: The format of the response (e.g., JSON, text).
        timeout: The timeout for LLM requests in seconds.
        seed: The random seed for reproducible results.
        cache_seed: The seed for caching LLM responses.
        parallel_tool_calls: Whether to enable parallel tool calls.
        tools: A list of tools available for the LLM.
        functions: A list of functions available for the LLM.
        max_tokens: The maximum number of tokens to generate.
        top_p: The nucleus sampling probability.
        routing_method: The method used to route requests (e.g., fixed_order, round_robin).
        **kwargs: Additional keyword arguments for future extensions.

    Examples:
        ```python
        # Example 1: create config from `kwargs` options
        config = LLMConfig(
            model="gpt-4o-mini",
            api_key=os.environ["OPENAI_API_KEY"],
        )

        # Example 2: create config from `config_list` dictionary
        config = LLMConfig(
            config_list={
                "model": "gpt-4o-mini",
                "api_key": os.environ["OPENAI_API_KEY"],
            }
        )

        # Example 3: create config from `config_list` list
        config = LLMConfig(
            config_list=[
                {
                    "model": "gpt-4o-mini",
                    "api_key": os.environ["OPENAI_API_KEY"],
                },
                {
                    "model": "gpt-4",
                    "api_key": os.environ["OPENAI_API_KEY"],
                },
            ]
        )
        ```
    """
    app_config = ApplicationConfig(
        max_tokens=max_tokens,
        top_p=top_p,
        temperature=temperature,
    )

    application_level_options = app_config.model_dump(exclude_none=True)

    final_config_list: list[LLMConfigEntry | dict[str, Any]] = []

    if isinstance(config_list, dict):
        config_list = [config_list]

    for c in filter(bool, (*config_list, kwargs)):
        if isinstance(c, LLMConfigEntry):
            final_config_list.append(c.apply_application_config(app_config))
            continue

        else:
            final_config_list.append({
                "api_type": "openai",  # default api_type
                **application_level_options,
                **c,
            })

    self._model = _LLMConfig(
        **application_level_options,
        config_list=final_config_list,
        check_every_ms=check_every_ms,
        seed=seed,
        allow_format_str_template=allow_format_str_template,
        response_format=response_format,
        timeout=timeout,
        cache_seed=cache_seed,
        tools=tools or [],
        functions=functions or [],
        parallel_tool_calls=parallel_tool_calls,
        routing_method=routing_method,
    )

get_current_llm_config classmethod #

get_current_llm_config(llm_config=None)
Source code in autogen/llm_config/config.py
@classmethod
def get_current_llm_config(cls, llm_config: "LLMConfig | None" = None) -> "LLMConfig | None":
    if llm_config is not None:
        return llm_config
    try:
        return (LLMConfig._current_llm_config.get()).copy()
    except LookupError:
        return None

from_json classmethod #

from_json(*, env=None, path=None, file_location=None, **kwargs)
Source code in autogen/llm_config/config.py
@classmethod
def from_json(
    cls,
    *,
    env: str | None = None,
    path: str | Path | None = None,
    file_location: str | None = None,
    **kwargs: Any,
) -> "LLMConfig":
    from autogen.oai.openai_utils import config_list_from_json

    if env is None and path is None:
        raise ValueError("Either 'env' or 'path' must be provided")
    if env is not None and path is not None:
        raise ValueError("Only one of 'env' or 'path' can be provided")

    config_list = config_list_from_json(
        env_or_file=env if env is not None else str(path), file_location=file_location
    )
    return LLMConfig(config_list=config_list, **kwargs)

where #

where(*, exclude=False, **kwargs)
Source code in autogen/llm_config/config.py
def where(self, *, exclude: bool = False, **kwargs: Any) -> "LLMConfig":
    from autogen.oai.openai_utils import filter_config

    filtered_config_list = filter_config(config_list=self.config_list, filter_dict=kwargs, exclude=exclude)
    if len(filtered_config_list) == 0:
        raise ValueError(f"No config found that satisfies the filter criteria: {kwargs}")

    kwargs = self.model_dump()
    kwargs["config_list"] = filtered_config_list

    return LLMConfig(**kwargs)

model_dump #

model_dump(*args, exclude_none=True, **kwargs)
Source code in autogen/llm_config/config.py
def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
    d = self._model.model_dump(*args, exclude_none=exclude_none, **kwargs)
    return {k: v for k, v in d.items() if not (isinstance(v, list) and len(v) == 0)}

model_dump_json #

model_dump_json(*args, exclude_none=True, **kwargs)
Source code in autogen/llm_config/config.py
def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
    # return self._model.model_dump_json(*args, exclude_none=exclude_none, **kwargs)
    d = self.model_dump(*args, exclude_none=exclude_none, **kwargs)
    return json.dumps(d)

model_validate #

model_validate(*args, **kwargs)
Source code in autogen/llm_config/config.py
def model_validate(self, *args: Any, **kwargs: Any) -> Any:
    return self._model.model_validate(*args, **kwargs)

model_validate_json #

model_validate_json(*args, **kwargs)
Source code in autogen/llm_config/config.py
@functools.wraps(BaseModel.model_validate_json)
def model_validate_json(self, *args: Any, **kwargs: Any) -> Any:
    return self._model.model_validate_json(*args, **kwargs)

model_validate_strings #

model_validate_strings(*args, **kwargs)
Source code in autogen/llm_config/config.py
@functools.wraps(BaseModel.model_validate_strings)
def model_validate_strings(self, *args: Any, **kwargs: Any) -> Any:
    return self._model.model_validate_strings(*args, **kwargs)

get #

get(key, default=None)
Source code in autogen/llm_config/config.py
def get(self, key: str, default: Any | None = None) -> Any:
    val = getattr(self._model, key, default)
    return val

copy #

copy()
Source code in autogen/llm_config/config.py
def copy(self) -> "LLMConfig":
    return self.__copy__()

deepcopy #

deepcopy(memo=None)
Source code in autogen/llm_config/config.py
def deepcopy(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
    return self.__deepcopy__(memo)

items #

items()
Source code in autogen/llm_config/config.py
def items(self) -> Iterable[tuple[str, Any]]:
    d = self.model_dump()
    return d.items()

keys #

keys()
Source code in autogen/llm_config/config.py
def keys(self) -> Iterable[str]:
    d = self.model_dump()
    return d.keys()

values #

values()
Source code in autogen/llm_config/config.py
def values(self) -> Iterable[Any]:
    d = self.model_dump()
    return d.values()