Deploy AI agents as HTTP APIs or MCP servers for integration with external applications.
PraisonAI Agents can be deployed as HTTP APIs or MCP (Model Context Protocol) servers, enabling seamless integration with web applications, microservices, and other systems.
from praisonaiagents import Agent# Create your agentagent = Agent( name="API Assistant", role="API helper", goal="Answer questions and perform tasks via API", backstory="An AI assistant accessible through HTTP endpoints", llm="gpt-4o-mini")
3
Launch as API
Deploy the agent as an HTTP API:
# Launch as HTTP APIagent.launch( protocol="http", # Default protocol host="0.0.0.0", port=8000, path="/assistant" # Available at http://localhost:8000/assistant)
4
Test the API
Test your deployed agent API:
curl -X POST http://localhost:8000/assistant \ -H "Content-Type: application/json" \ -d '{"message": "Hello, how can you help me?"}'
The most common deployment method is as an HTTP API server using FastAPI:
from praisonaiagents import Agent# Create agentagent = Agent( name="Customer Support", role="Support specialist", goal="Help customers with their inquiries", tools=["web_search", "knowledge_base"] # Optional tools)# Launch as HTTP APIagent.launch( protocol="http", # Protocol type (default: "http") host="0.0.0.0", # Listen on all interfaces port=8000, # Port number path="/support", # API endpoint path debug=False # Debug mode for development)# API will be available at:# POST http://localhost:8000/support
from praisonaiagents import Agent# Create agentagent = Agent( name="SearchAgent", instructions="Search the internet for information", llm="gpt-4o-mini")# Launch as MCP serveragent.launch( protocol="mcp", # MCP protocol port=8080, # Port number host="0.0.0.0" # Host address)# MCP server will create SSE endpoints# Tool will be named: execute_SearchAgent_task
from praisonaiagents import Agent# Create an agent with specific instructionsagent = Agent( name="Research Assistant", instructions="You are a helpful research assistant. Help users find and summarize information.", llm="gpt-4o-mini")# Launch the agentagent.launch( path="/research", port=3030, host="0.0.0.0")# The agent is now available at http://localhost:3030/research
from praisonaiagents import Agent, AgentTeam# Create multiple specialized agentsresearch_agent = Agent( name="Research", instructions="Research and gather information on topics")summarize_agent = Agent( name="Summarize", instructions="Create concise summaries of provided information")# Create an agents collectionagents = AgentTeam( name="ResearchTeam", agents=[research_agent, summarize_agent])# Launch all agents on the same endpointagents.launch( path="/team", port=8000, host="0.0.0.0")# All agents available at http://localhost:8000/team
from praisonaiagents import Agent# Create agent with toolsagent = Agent( name="TweetAgent", instructions="Create engaging tweets based on the topic provided", tools=["web_search"] # Can include tools)# Launch as MCP serveragent.launch( protocol="mcp", port=8080, host="127.0.0.1")# MCP tool available as: execute_TweetAgent_task
from praisonaiagents import Agent# Create different agentssales_agent = Agent( name="Sales", instructions="Help with product information and sales")support_agent = Agent( name="Support", instructions="Provide technical support")# Launch on different paths but same portsales_agent.launch( path="/sales", port=8000)support_agent.launch( path="/support", port=8000)# Both available on port 8000:# - http://localhost:8000/sales# - http://localhost:8000/support
Multiple Agent / Agents instances may call .launch(port=N) concurrently from different threads — registration is atomic. If two launch calls use the same path on the same port, the second gets an auto-suffixed path (/path_abc123) and a warning is logged. Server readiness is signalled deterministically (no fixed sleep); .launch() returns only after the port is accepting connections (5s timeout).
from mcp import Client# Connect to MCP serverclient = Client("http://localhost:8080")# Call the agent toolresult = await client.call_tool( "execute_TweetAgent_task", arguments={"input": "Create a tweet about AI"})