Skip to content

Handling errors with the RunResponseProtocol#

Open In Colab Open on GitHub

from typing import Optional

from autogen import ConversableAgent, LLMConfig
from autogen.events.agent_events import ExecutedFunctionEvent
from autogen.io.processors.base import EventProcessorProtocol
from autogen.io.processors.console_event_processor import ConsoleEventProcessor
from autogen.io.run_response import RunResponseProtocol
from autogen.tools import tool

Handling custom LLM errors#

# use this instead of the default response.process() to handle custom error messages
def custom_response_process(
    response: RunResponseProtocol,
    processor: Optional[EventProcessorProtocol] = None,
    fail_on_error_messages_list: Optional[list[str]] = None,
) -> None:
    processor = processor or ConsoleEventProcessor()
    for event in response.events:
        if (
            fail_on_error_messages_list
            and isinstance(event, ExecutedFunctionEvent)
            and not event.content.is_exec_success
            and any(error_msg in event.content.content for error_msg in fail_on_error_messages_list)
        ):
            raise RuntimeError(f"Function execution failed: {event.content}")
        processor.process_event(event)
llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")

for error_msg in ["Some other error message", "This function is not implemented yet."]:
    agent = ConversableAgent(
        name="agent",
        llm_config=llm_config,
    )

    @tool(description="List files and folders")
    def list_files(
        folder_name: str,
    ) -> str:
        raise NotImplementedError("This function is not implemented yet.")

    list_files.register_for_llm(agent)

    response = agent.run(
        message="List all files and folders in the 'root' folder",
        tools=agent.tools,
        user_input=False,
        max_turns=3,
    )

    # The tool will raise NotImplementedError("This function is not implemented yet.") and we won't kill the process
    custom_response_process(
        response=response,
        fail_on_error_messages_list=[error_msg],
    )
    print(f"Summary: {response.summary}")
    print("*" * 40 + "\nSUCCESS\n" + "*" * 40)

Inner agents exceptions - team within a tool#

llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")

agent = ConversableAgent(
    name="agent",
    llm_config=llm_config,
)

@tool(description="List files and folders")
def list_files(
    folder_name: str,
) -> str:
    # llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini", api_key="abc")
    llm_config = LLMConfig(api_type="google", model="gemini-2.0-flash", api_key="abc")

    agent = ConversableAgent(
        name="agent",
        llm_config=llm_config,
    )
    response = agent.run(
        message="List all files and folders in the 'root' folder",
        tools=agent.tools,
        user_input=False,
        max_turns=3,
    )
    response.process()
    return response.summary

list_files.register_for_llm(agent)

response = agent.run(
    message="List all files and folders in the 'root' folder",
    tools=agent.tools,
    user_input=False,
    max_turns=3,
)

fail_on_error_messages_list = [
    "Incorrect API key provided",  # openai
    "API key not valid. Please pass a valid API key.",  # gemini
]

# event will contain "content="Error: Error code: 401 - {'error': {'message': 'Incorrect API key provided: abc. You can find your API key ...."
# and error will be raised
custom_response_process(response=response, fail_on_error_messages_list=fail_on_error_messages_list)

Async version#

# use this instead of the default await response.process() to handle custom error messages
from autogen.io.processors.console_event_processor import AsyncConsoleEventProcessor
from autogen.io.run_response import AsyncRunResponseProtocol

async def a_custom_response_process(
    response: AsyncRunResponseProtocol,
    processor: Optional[AsyncConsoleEventProcessor] = None,
    fail_on_error_messages_list: Optional[list[str]] = None,
) -> None:
    processor = processor or AsyncConsoleEventProcessor()
    async for event in response.events:
        if (
            fail_on_error_messages_list
            and isinstance(event, ExecutedFunctionEvent)
            and not event.content.is_exec_success
            and any(error_msg in event.content.content for error_msg in fail_on_error_messages_list)
        ):
            raise RuntimeError(f"Function execution failed: {event.content}")
        await processor.process_event(event)
llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini")

for error_msg in ["Some other error message", "This function is not implemented yet."]:
    agent = ConversableAgent(
        name="agent",
        llm_config=llm_config,
    )

    @tool(description="List files and folders")
    def list_files(
        folder_name: str,
    ) -> str:
        raise NotImplementedError("This function is not implemented yet.")

    list_files.register_for_llm(agent)

    response = await agent.a_run(
        message="List all files and folders in the 'root' folder",
        tools=agent.tools,
        user_input=False,
        max_turns=3,
    )

    await a_custom_response_process(
        response=response,
        fail_on_error_messages_list=[error_msg],
    )
    print(f"Summary: {await response.summary}")
    print("*" * 40 + "\nSUCCESS\n" + "*" * 40)