Skip to content

instrument_llm_wrapper

autogen.opentelemetry.instrument_llm_wrapper #

instrument_llm_wrapper(*, tracer_provider, capture_messages=False)

Instrument OpenAIWrapper.create() to emit LLM spans.

This creates 'chat' spans for each LLM API call, capturing: - Provider name (openai, anthropic, etc.) - Model name (gpt-4, claude-3, etc.) - Token usage (input/output) - Response metadata (finish reasons, cost)

LLM spans automatically become children of agent invoke spans via OpenTelemetry's context propagation.

PARAMETER DESCRIPTION
tracer_provider

The OpenTelemetry tracer provider

TYPE: TracerProvider

capture_messages

If True, capture input/output messages in span attributes. Default is False since messages may contain sensitive data.

TYPE: bool DEFAULT: False

Usage

from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from autogen.instrumentation import instrument_llm_wrapper

resource = Resource.create(attributes={"service.name": "my-service"}) tracer_provider = TracerProvider(resource=resource) exporter = OTLPSpanExporter(endpoint="http://127.0.0.1:4317") processor = BatchSpanProcessor(exporter) tracer_provider.add_span_processor(processor) trace.set_tracer_provider(tracer_provider)

instrument_llm_wrapper(tracer_provider=tracer_provider)

Or with message capture enabled (for debugging)#

instrument_llm_wrapper(tracer_provider=tracer_provider, capture_messages=True)

Source code in autogen/opentelemetry/instrumentators/llm_wrapper.py
@export_module("autogen.opentelemetry")
def instrument_llm_wrapper(*, tracer_provider: TracerProvider, capture_messages: bool = False) -> None:
    """Instrument OpenAIWrapper.create() to emit LLM spans.

    This creates 'chat' spans for each LLM API call, capturing:
    - Provider name (openai, anthropic, etc.)
    - Model name (gpt-4, claude-3, etc.)
    - Token usage (input/output)
    - Response metadata (finish reasons, cost)

    LLM spans automatically become children of agent invoke spans via
    OpenTelemetry's context propagation.

    Args:
        tracer_provider: The OpenTelemetry tracer provider
        capture_messages: If True, capture input/output messages in span attributes.
            Default is False since messages may contain sensitive data.

    Usage:
        from opentelemetry import trace
        from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
        from opentelemetry.sdk.resources import Resource
        from opentelemetry.sdk.trace import TracerProvider
        from opentelemetry.sdk.trace.export import BatchSpanProcessor
        from autogen.instrumentation import instrument_llm_wrapper

        resource = Resource.create(attributes={"service.name": "my-service"})
        tracer_provider = TracerProvider(resource=resource)
        exporter = OTLPSpanExporter(endpoint="http://127.0.0.1:4317")
        processor = BatchSpanProcessor(exporter)
        tracer_provider.add_span_processor(processor)
        trace.set_tracer_provider(tracer_provider)

        instrument_llm_wrapper(tracer_provider=tracer_provider)

        # Or with message capture enabled (for debugging)
        instrument_llm_wrapper(tracer_provider=tracer_provider, capture_messages=True)
    """
    tracer = get_tracer(tracer_provider)
    original_create = OpenAIWrapper.create

    if hasattr(original_create, "__otel_wrapped__"):
        return

    def traced_create(self: OpenAIWrapper, **config: Any) -> Any:
        # Get model from config or wrapper's config_list
        model = config.get("model") or get_model_from_config_list(self._config_list)
        span_name = f"chat {model}" if model else "chat"

        with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
            # Required attributes
            span.set_attribute("ag2.span.type", SpanType.LLM.value)
            span.set_attribute("gen_ai.operation.name", "chat")

            # Provider and model
            provider = get_provider_from_config_list(self._config_list)
            if provider:
                span.set_attribute("gen_ai.provider.name", provider)
            if model:
                span.set_attribute("gen_ai.request.model", model)

            # Agent name (from extra_kwargs passed by ConversableAgent)
            agent = config.get("agent")
            if agent and hasattr(agent, "name"):
                span.set_attribute("gen_ai.agent.name", agent.name)

            # Request parameters
            set_llm_request_params(span, config)

            # Input messages (opt-in)
            if capture_messages and "messages" in config:
                otel_msgs = messages_to_otel(config["messages"])
                span.set_attribute("gen_ai.input.messages", json.dumps(otel_msgs))

            try:
                response = original_create(self, **config)
            except Exception as e:
                span.set_attribute("error.type", type(e).__name__)
                raise

            # Response attributes
            _set_llm_response_attributes(span, response, capture_messages)

            return response

    traced_create.__otel_wrapped__ = True

    # Apply the patch to OpenAIWrapper.create
    OpenAIWrapper.create = traced_create
    oai_client_module.OpenAIWrapper.create = traced_create