# use this instead of the default response.process() to handle custom error messagesdefcustom_response_process(response:RunResponseProtocol,processor:Optional[EventProcessorProtocol]=None,fail_on_error_messages_list:Optional[list[str]]=None,)->None:processor=processororConsoleEventProcessor()foreventinresponse.events:if(fail_on_error_messages_listandisinstance(event,ExecutedFunctionEvent)andnotevent.content.is_exec_successandany(error_msginevent.content.contentforerror_msginfail_on_error_messages_list)):raiseRuntimeError(f"Function execution failed: {event.content}")processor.process_event(event)
llm_config=LLMConfig(api_type="openai",model="gpt-4o-mini")forerror_msgin["Some other error message","This function is not implemented yet."]:agent=ConversableAgent(name="agent",llm_config=llm_config,)@tool(description="List files and folders")deflist_files(folder_name:str,)->str:raiseNotImplementedError("This function is not implemented yet.")list_files.register_for_llm(agent)response=agent.run(message="List all files and folders in the 'root' folder",tools=agent.tools,user_input=False,max_turns=3,)# The tool will raise NotImplementedError("This function is not implemented yet.") and we won't kill the processcustom_response_process(response=response,fail_on_error_messages_list=[error_msg],)print(f"Summary: {response.summary}")print("*"*40+"\nSUCCESS\n"+"*"*40)
llm_config=LLMConfig(api_type="openai",model="gpt-4o-mini")agent=ConversableAgent(name="agent",llm_config=llm_config,)@tool(description="List files and folders")deflist_files(folder_name:str,)->str:# llm_config = LLMConfig(api_type="openai", model="gpt-4o-mini", api_key="abc")llm_config=LLMConfig(api_type="google",model="gemini-2.0-flash",api_key="abc")agent=ConversableAgent(name="agent",llm_config=llm_config,)response=agent.run(message="List all files and folders in the 'root' folder",tools=agent.tools,user_input=False,max_turns=3,)response.process()returnresponse.summarylist_files.register_for_llm(agent)response=agent.run(message="List all files and folders in the 'root' folder",tools=agent.tools,user_input=False,max_turns=3,)fail_on_error_messages_list=["Incorrect API key provided",# openai"API key not valid. Please pass a valid API key.",# gemini]# event will contain "content="Error: Error code: 401 - {'error': {'message': 'Incorrect API key provided: abc. You can find your API key ...."# and error will be raisedcustom_response_process(response=response,fail_on_error_messages_list=fail_on_error_messages_list)
# use this instead of the default await response.process() to handle custom error messagesfromautogen.io.processors.console_event_processorimportAsyncConsoleEventProcessorfromautogen.io.run_responseimportAsyncRunResponseProtocolasyncdefa_custom_response_process(response:AsyncRunResponseProtocol,processor:Optional[AsyncConsoleEventProcessor]=None,fail_on_error_messages_list:Optional[list[str]]=None,)->None:processor=processororAsyncConsoleEventProcessor()asyncforeventinresponse.events:if(fail_on_error_messages_listandisinstance(event,ExecutedFunctionEvent)andnotevent.content.is_exec_successandany(error_msginevent.content.contentforerror_msginfail_on_error_messages_list)):raiseRuntimeError(f"Function execution failed: {event.content}")awaitprocessor.process_event(event)
llm_config=LLMConfig(api_type="openai",model="gpt-4o-mini")forerror_msgin["Some other error message","This function is not implemented yet."]:agent=ConversableAgent(name="agent",llm_config=llm_config,)@tool(description="List files and folders")deflist_files(folder_name:str,)->str:raiseNotImplementedError("This function is not implemented yet.")list_files.register_for_llm(agent)response=awaitagent.a_run(message="List all files and folders in the 'root' folder",tools=agent.tools,user_input=False,max_turns=3,)awaita_custom_response_process(response=response,fail_on_error_messages_list=[error_msg],)print(f"Summary: {awaitresponse.summary}")print("*"*40+"\nSUCCESS\n"+"*"*40)