Skip to main content

Agent Performance Monitoring

Track latency, throughput, and errors across your Agent deployments. Identify bottlenecks and optimize performance.

Create Performance Monitor

import { PerformanceMonitor, createPerformanceMonitor } from 'praisonai';

const monitor = createPerformanceMonitor({
  id: 'production-agents'
});

// Record metrics
monitor.record('llm_latency', 150);
monitor.record('llm_latency', 180);
monitor.record('llm_latency', 120);

// Get statistics
const stats = monitor.getStats('llm_latency');
console.log('LLM Latency Stats:');
console.log(`  Average: ${stats.avg}ms`);
console.log(`  Min: ${stats.min}ms`);
console.log(`  Max: ${stats.max}ms`);
console.log(`  P95: ${stats.p95}ms`);
console.log(`  Count: ${stats.count}`);

Time Agent Operations

import { Agent, PerformanceMonitor } from 'praisonai';

const monitor = new PerformanceMonitor();

const agent = new Agent({
  name: 'Monitored Agent',
  instructions: 'You are a helpful assistant.'
});

async function timedChat(message: string) {
  const timer = monitor.startTimer('agent_response');
  
  try {
    const response = await agent.chat(message);
    timer.stop();
    
    monitor.record('response_length', response.length);
    return response;
  } catch (error) {
    timer.stop();
    monitor.recordError('agent_response', error);
    throw error;
  }
}

await timedChat('Hello!');
await timedChat('Tell me a joke');

console.log('Response Time:', monitor.getStats('agent_response'));
console.log('Response Length:', monitor.getStats('response_length'));

Track Tool Performance

import { Agent, createTool, PerformanceMonitor } from 'praisonai';

const monitor = new PerformanceMonitor();

const searchTool = createTool({
  name: 'search',
  description: 'Search the web',
  parameters: { type: 'object', properties: { query: { type: 'string' } } },
  execute: async ({ query }) => {
    const timer = monitor.startTimer('tool_search');
    try {
      // Simulated search
      const result = `Results for: ${query}`;
      timer.stop();
      return result;
    } catch (error) {
      timer.stop();
      monitor.recordError('tool_search', error);
      throw error;
    }
  }
});

const agent = new Agent({
  name: 'Search Agent',
  instructions: 'Search for information.',
  tools: [searchTool]
});

await agent.chat('Search for TypeScript tutorials');

console.log('Tool Performance:', monitor.getStats('tool_search'));

Multi-Agent Monitoring

import { Agent, Agents, PerformanceMonitor } from 'praisonai';

const monitor = new PerformanceMonitor({ id: 'multi-agent' });

const researcher = new Agent({ name: 'Researcher', instructions: 'Research topics.' });
const writer = new Agent({ name: 'Writer', instructions: 'Write summaries.' });

const agents = new Agents({
  agents: [researcher, writer],
  tasks: ['Research AI', 'Summarize findings']
});

// Time the entire pipeline
const pipelineTimer = monitor.startTimer('full_pipeline');
await agents.start();
pipelineTimer.stop();

// Get pipeline stats
console.log('Pipeline Performance:');
console.log(monitor.getStats('full_pipeline'));

Export Metrics

import { PerformanceMonitor } from 'praisonai';

const monitor = new PerformanceMonitor();

// Record various metrics
monitor.record('llm_calls', 100);
monitor.record('tool_calls', 50);
monitor.record('errors', 2);

// Export all metrics
const exported = monitor.export();
console.log(JSON.stringify(exported, null, 2));

// Export for Prometheus format
const prometheus = monitor.exportPrometheus();
console.log(prometheus);

Performance Dashboard

import { PerformanceMonitor } from 'praisonai';

const monitor = new PerformanceMonitor();

// After collecting metrics...
function printDashboard() {
  console.log('=== Performance Dashboard ===');
  
  const metrics = ['llm_latency', 'tool_latency', 'total_response_time'];
  
  for (const metric of metrics) {
    const stats = monitor.getStats(metric);
    if (stats.count > 0) {
      console.log(`\n${metric}:`);
      console.log(`  Avg: ${stats.avg.toFixed(2)}ms`);
      console.log(`  P95: ${stats.p95.toFixed(2)}ms`);
      console.log(`  Errors: ${stats.errors}`);
    }
  }
}

printDashboard();

Metric Types

TypeDescription
latencyResponse time in milliseconds
throughputRequests per second
errorsError count and rates
countersCumulative counts
gaugesCurrent values