Skip to main content

Observability Suite

PraisonAI provides a unified observability suite with support for 20+ observability providers. All integrations use lazy loading for zero performance impact when not in use.

Quick Start

from praisonai_tools.observability import obs

# Auto-detect provider from environment variables
obs.init()

# Or specify a provider explicitly
obs.init(provider="langfuse")

Supported Providers

ProviderEnvironment VariablesInstall
AgentOpsAGENTOPS_API_KEYpip install agentops
LangfuseLANGFUSE_PUBLIC_KEY, LANGFUSE_SECRET_KEYpip install opentelemetry-sdk opentelemetry-exporter-otlp
LangSmithLANGSMITH_API_KEYpip install opentelemetry-sdk opentelemetry-exporter-otlp
TraceloopTRACELOOP_API_KEYpip install traceloop-sdk
Arize PhoenixPHOENIX_API_KEYpip install arize-phoenix
OpenLIT-pip install openlit
LangtraceLANGTRACE_API_KEYpip install langtrace-python-sdk
LangWatchLANGWATCH_API_KEYpip install langwatch
DatadogDD_API_KEYpip install ddtrace
MLflowMLFLOW_TRACKING_URIpip install mlflow
OpikOPIK_API_KEYpip install opik
PortkeyPORTKEY_API_KEYpip install portkey-ai
BraintrustBRAINTRUST_API_KEYpip install braintrust
MaximMAXIM_API_KEYpip install maxim-py
WeaveWANDB_API_KEYpip install weave
NeatlogsNEATLOGS_API_KEY-
LangDBLANGDB_API_KEY-
AtlaATLA_API_KEYpip install atla-insights
PatronusPATRONUS_API_KEYpip install patronus
TrueFoundryTRUEFOUNDRY_API_KEY-

Core Concepts

Traces and Spans

from praisonai_tools.observability import obs
from praisonai_tools.observability.base import SpanKind

obs.init(provider="langfuse")

# Create a trace for a workflow
with obs.trace("my-workflow", session_id="user-123"):
    # Create spans for individual operations
    with obs.span("llm-call", kind=SpanKind.LLM) as span:
        span.model = "gpt-4o-mini"
        span.input_tokens = 100
        span.output_tokens = 50
        # ... your LLM call
    
    with obs.span("tool-call", kind=SpanKind.TOOL) as span:
        span.tool_name = "search"
        # ... your tool call

Logging LLM Calls

obs.log_llm_call(
    model="gpt-4o-mini",
    input_messages="What is 2+2?",
    output="4",
    input_tokens=10,
    output_tokens=1,
)

Logging Tool Calls

obs.log_tool_call(
    tool_name="search",
    tool_input={"query": "PraisonAI"},
    tool_output={"results": [...]},
)

Decorators

@obs.decorator("my-function", SpanKind.CUSTOM)
def my_function():
    return "result"

Multi-Agent Tracing

from praisonai_tools.observability import obs
from praisonai_tools.observability.base import SpanKind

obs.init(provider="langfuse")

with obs.trace("multi-agent-workflow"):
    # Agent 1
    with obs.span("agent-1", kind=SpanKind.AGENT) as span:
        span.attributes["agent_name"] = "Researcher"
        # ... agent 1 work
    
    # Agent 2 (child of same trace)
    with obs.span("agent-2", kind=SpanKind.AGENT) as span:
        span.attributes["agent_name"] = "Writer"
        # ... agent 2 work

CLI Commands

# List available providers
praisonai obs list

# Check provider connectivity
praisonai obs doctor

# Initialize a provider
praisonai obs init langfuse

Diagnostics

# Check observability status
print(obs.doctor())
# Output: {'enabled': True, 'provider': 'langfuse', 'connection_status': True, ...}