Skip to content

a_run_group_chat

autogen.agentchat.a_run_group_chat async #

a_run_group_chat(pattern, messages, max_rounds=20, safeguard_policy=None, safeguard_llm_config=None, mask_llm_config=None)

Async version of run_group_chat for running group chats in async contexts.

This method executes a multi-agent conversation as an async task and returns immediately with an AsyncRunResponse object that can be used to iterate over events.

For step-by-step execution with control over each event, use a_run_group_chat_iter() instead.

PARAMETER DESCRIPTION
pattern

The pattern that defines how agents interact (e.g., AutoPattern, RoundRobinPattern, RandomPattern).

TYPE: Pattern

messages

The initial message(s) to start the conversation. Can be a string or a list of message dictionaries.

TYPE: list[dict[str, Any]] | str

max_rounds

Maximum number of conversation rounds. Defaults to 20.

TYPE: int DEFAULT: 20

safeguard_policy

Optional safeguard policy for content filtering.

TYPE: dict[str, Any] | str | None DEFAULT: None

safeguard_llm_config

Optional LLM config for safeguard evaluation.

TYPE: LLMConfig | None DEFAULT: None

mask_llm_config

Optional LLM config for content masking.

TYPE: LLMConfig | None DEFAULT: None

RETURNS DESCRIPTION
AsyncRunResponseProtocol

AsyncRunResponseProtocol

Source code in autogen/agentchat/group/multi_agent_chat.py
@export_module("autogen.agentchat")
async def a_run_group_chat(
    pattern: "Pattern",
    messages: list[dict[str, Any]] | str,
    max_rounds: int = 20,
    safeguard_policy: dict[str, Any] | str | None = None,
    safeguard_llm_config: LLMConfig | None = None,
    mask_llm_config: LLMConfig | None = None,
) -> AsyncRunResponseProtocol:
    """Async version of run_group_chat for running group chats in async contexts.

    This method executes a multi-agent conversation as an async task and returns
    immediately with an AsyncRunResponse object that can be used to iterate over events.

    For step-by-step execution with control over each event, use a_run_group_chat_iter() instead.

    Args:
        pattern: The pattern that defines how agents interact (e.g., AutoPattern,
            RoundRobinPattern, RandomPattern).
        messages: The initial message(s) to start the conversation. Can be a string
            or a list of message dictionaries.
        max_rounds: Maximum number of conversation rounds. Defaults to 20.
        safeguard_policy: Optional safeguard policy for content filtering.
        safeguard_llm_config: Optional LLM config for safeguard evaluation.
        mask_llm_config: Optional LLM config for content masking.

    Returns:
        AsyncRunResponseProtocol
    """
    iostream = AsyncThreadIOStream()
    all_agents = pattern.agents + ([pattern.user_agent] if pattern.user_agent else [])
    response = AsyncRunResponse(iostream, agents=all_agents)

    async def _initiate_group_chat(
        pattern: "Pattern" = pattern,
        messages: list[dict[str, Any]] | str = messages,
        max_rounds: int = max_rounds,
        safeguard_policy: dict[str, Any] | str | None = safeguard_policy,
        safeguard_llm_config: LLMConfig | None = safeguard_llm_config,
        mask_llm_config: LLMConfig | None = mask_llm_config,
        iostream: AsyncThreadIOStream = iostream,
        response: AsyncRunResponse = response,
    ) -> None:
        with IOStream.set_default(iostream):
            try:
                chat_result, context_vars, agent = await a_initiate_group_chat(
                    pattern=pattern,
                    messages=messages,
                    max_rounds=max_rounds,
                    safeguard_policy=safeguard_policy,
                    safeguard_llm_config=safeguard_llm_config,
                    mask_llm_config=mask_llm_config,
                )

                iostream.send(
                    RunCompletionEvent(  # type: ignore[call-arg]
                        history=chat_result.chat_history,
                        summary=chat_result.summary,
                        cost=chat_result.cost,
                        last_speaker=agent.name,
                        context_variables=context_vars,
                    )
                )
            except Exception as e:
                iostream.send(ErrorEvent(error=e))  # type: ignore[call-arg]

    task = asyncio.create_task(_initiate_group_chat())
    # prevent the task from being garbage collected
    response._task_ref = task  # type: ignore[attr-defined]
    return response