Deploy Agent Recipes as a Server
Run Agent Recipes as HTTP services for integration with web applications, microservices, and automation pipelines.Quick Start
Basic Server Setup
Copy
from flask import Flask, request, jsonify
from praisonai_tools.recipe_tools import LLMTool
app = Flask(__name__)
llm = LLMTool(provider="openai", model="gpt-4o-mini")
@app.route("/health", methods=["GET"])
def health():
"""Health check endpoint."""
deps = llm.check_dependencies()
return jsonify({
"status": "healthy" if deps.get("api_key") else "unhealthy",
"dependencies": deps
})
@app.route("/recipes/blog", methods=["POST"])
def generate_blog():
"""Generate blog post."""
data = request.json
topic = data.get("topic", "")
response = llm.complete(
f"Write a blog post about: {topic}",
system="You are an expert blog writer.",
max_tokens=2000
)
return jsonify({
"content": response.content,
"model": response.model,
"tokens": response.usage
})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
Run the Server
Copy
# Set API key
export OPENAI_API_KEY=sk-...
# Install dependencies
pip install flask praisonai-tools
# Run server
python server.py
Test Endpoints
Copy
# Health check
curl http://localhost:8080/health
# Generate blog
curl -X POST http://localhost:8080/recipes/blog \
-H "Content-Type: application/json" \
-d '{"topic": "AI in Healthcare"}'
Production Server with FastAPI
Copy
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from praisonai_tools.recipe_tools import LLMTool, VisionTool, ChartTool
import os
app = FastAPI(title="Agent Recipes API", version="1.0.0")
# Initialize tools
llm = LLMTool(provider="openai", model="gpt-4o-mini")
vision = VisionTool(provider="openai")
chart = ChartTool()
class BlogRequest(BaseModel):
topic: str
style: str = "professional"
max_tokens: int = 2000
class SentimentRequest(BaseModel):
text: str
class ChartRequest(BaseModel):
data: dict
chart_type: str = "bar"
title: str = "Chart"
@app.get("/health")
async def health():
"""Health check with dependency status."""
return {
"status": "healthy",
"llm": llm.check_dependencies(),
"vision": vision.check_dependencies(),
"chart": chart.check_dependencies()
}
@app.post("/recipes/blog")
async def generate_blog(req: BlogRequest):
"""Generate a blog post."""
try:
response = llm.complete(
f"Write a {req.style} blog post about: {req.topic}",
system="You are an expert blog writer.",
max_tokens=req.max_tokens
)
return {
"content": response.content,
"model": response.model,
"tokens": response.usage
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/recipes/sentiment")
async def analyze_sentiment(req: SentimentRequest):
"""Analyze text sentiment."""
try:
response = llm.complete(
f"Analyze the sentiment of this text and respond with JSON: {req.text}",
max_tokens=100
)
return {"analysis": response.content}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/recipes/chart")
async def generate_chart(req: ChartRequest):
"""Generate a chart from data."""
try:
if req.chart_type == "bar":
result = chart.bar(req.data, title=req.title)
elif req.chart_type == "line":
result = chart.line(req.data, title=req.title)
elif req.chart_type == "pie":
result = chart.pie(req.data, title=req.title)
else:
raise HTTPException(status_code=400, detail="Invalid chart type")
return {"path": result.path, "type": req.chart_type}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8080)
Run FastAPI Server
Copy
# Install dependencies
pip install fastapi uvicorn praisonai-tools
# Run with auto-reload for development
uvicorn server:app --reload --host 0.0.0.0 --port 8080
# Production with multiple workers
uvicorn server:app --workers 4 --host 0.0.0.0 --port 8080
Docker Deployment
Dockerfile
Copy
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
ffmpeg \
tesseract-ocr \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
# Expose port
EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8080/health || exit 1
# Run server
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "8080"]
requirements.txt
Copy
fastapi>=0.100.0
uvicorn>=0.23.0
praisonai-tools>=0.0.1
python-multipart>=0.0.6
docker-compose.yml
Copy
version: '3.8'
services:
recipes-api:
build: .
ports:
- "8080:8080"
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
volumes:
- ./output:/app/output
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
Build and Run
Copy
# Build image
docker build -t recipes-api .
# Run container
docker run -d \
-p 8080:8080 \
-e OPENAI_API_KEY=$OPENAI_API_KEY \
--name recipes-api \
recipes-api
# Or with docker-compose
docker-compose up -d
Configuration
Environment Variables
| Variable | Description | Required |
|---|---|---|
OPENAI_API_KEY | OpenAI API key | Yes (for OpenAI) |
ANTHROPIC_API_KEY | Anthropic API key | No |
GOOGLE_API_KEY | Google API key | No |
RECIPES_LOG_LEVEL | Logging level (DEBUG, INFO, WARNING) | No |
RECIPES_MAX_TOKENS | Default max tokens | No |
RECIPES_TIMEOUT | Request timeout in seconds | No |
Logging Configuration
Copy
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Mask API keys in logs
class SecretFilter(logging.Filter):
def filter(self, record):
if hasattr(record, 'msg'):
record.msg = record.msg.replace(
os.environ.get('OPENAI_API_KEY', ''),
'****'
)
return True
logging.getLogger().addFilter(SecretFilter())
Monitoring
Prometheus Metrics
Copy
from prometheus_client import Counter, Histogram, generate_latest
from fastapi import Response
REQUEST_COUNT = Counter(
'recipes_requests_total',
'Total recipe requests',
['recipe', 'status']
)
REQUEST_LATENCY = Histogram(
'recipes_request_latency_seconds',
'Request latency',
['recipe']
)
@app.get("/metrics")
async def metrics():
return Response(
generate_latest(),
media_type="text/plain"
)
Health Check Endpoint
Copy
@app.get("/health")
async def health():
checks = {
"llm": llm.check_dependencies(),
"disk_space": check_disk_space(),
"memory": check_memory()
}
all_healthy = all(
v.get("api_key", True) for v in checks.values()
if isinstance(v, dict)
)
return {
"status": "healthy" if all_healthy else "degraded",
"checks": checks,
"timestamp": datetime.utcnow().isoformat()
}
Security
API Key Authentication
Copy
from fastapi import Security, HTTPException
from fastapi.security import APIKeyHeader
API_KEY_HEADER = APIKeyHeader(name="X-API-Key")
async def verify_api_key(api_key: str = Security(API_KEY_HEADER)):
if api_key != os.environ.get("RECIPES_API_KEY"):
raise HTTPException(status_code=403, detail="Invalid API key")
return api_key
@app.post("/recipes/blog")
async def generate_blog(req: BlogRequest, api_key: str = Security(verify_api_key)):
# ... implementation
Rate Limiting
Copy
from slowapi import Limiter
from slowapi.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
app.state.limiter = limiter
@app.post("/recipes/blog")
@limiter.limit("10/minute")
async def generate_blog(request: Request, req: BlogRequest):
# ... implementation
Next Steps
- CLI Reference - Command line usage
- Python Integration - Use in applications
- Troubleshooting - Common issues

