Agent Recipes Integration
Learn how to use Agent Recipes programmatically in your Python applications.Installation
Copy
pip install praisonai agent-recipes praisonai-tools
Basic Usage
Using the LLM Tool Directly
Copy
import os
os.environ['OPENAI_API_KEY'] = 'your-api-key'
from praisonai_tools.recipe_tools import LLMTool
# Initialize LLM tool
llm = LLMTool(provider="openai", model="gpt-4o-mini")
# Simple completion
response = llm.complete("Write a haiku about coding")
print(response.content)
print(f"Tokens used: {response.usage['total_tokens']}")
Using Multiple Providers
Copy
from praisonai_tools.recipe_tools import LLMTool
# OpenAI
openai_llm = LLMTool(provider="openai", model="gpt-4o-mini")
# Anthropic
anthropic_llm = LLMTool(provider="anthropic", model="claude-3-haiku-20240307")
# Google
google_llm = LLMTool(provider="google", model="gemini-1.5-flash")
# Use any provider
response = openai_llm.complete("Hello!")
JSON Extraction
Copy
from praisonai_tools.recipe_tools import LLMTool
llm = LLMTool(provider="openai")
# Extract structured data
schema = {"name": "string", "age": "number", "skills": "array"}
result = llm.extract_json(
"John is 30 years old and knows Python, JavaScript, and Go",
schema=schema
)
print(result)
# {'name': 'John', 'age': 30, 'skills': ['Python', 'JavaScript', 'Go']}
Using Recipe Tools
Vision Tool
Copy
from praisonai_tools.recipe_tools import VisionTool
vision = VisionTool(provider="openai")
# Analyze an image
result = vision.analyze("photo.jpg", prompt="Describe this image")
print(result.description)
# Generate caption
caption = vision.caption("photo.jpg")
print(caption)
# Extract tags
tags = vision.tag("photo.jpg")
print(tags) # ['nature', 'landscape', 'mountains']
Chart Tool
Copy
from praisonai_tools.recipe_tools import ChartTool
chart = ChartTool()
# Create bar chart from data
data = {"Q1": 100, "Q2": 150, "Q3": 200, "Q4": 180}
result = chart.bar(data, title="Quarterly Sales", output="sales.png")
print(f"Chart saved to: {result.path}")
# Create from CSV
result = chart.line("data.csv", x="date", y="value", output="trend.png")
Email Tool
Copy
from praisonai_tools.recipe_tools import EmailTool
email = EmailTool()
# Parse email file
parsed = email.parse("message.eml")
print(f"From: {parsed.sender}")
print(f"Subject: {parsed.subject}")
print(f"Attachments: {len(parsed.attachments)}")
# Extract structured data with LLM
extracted = email.extract("message.eml", fields=["action_items", "deadlines"])
print(extracted)
Loading and Running Recipes
Load Recipe Configuration
Copy
import yaml
from pathlib import Path
def load_recipe(name: str):
"""Load a recipe's TEMPLATE.yaml configuration."""
recipe_path = Path.home() / ".praison" / "templates" / name
if not recipe_path.exists():
recipe_path = Path("/Users/praison/Agent-Recipes/agent_recipes/templates") / name
template_file = recipe_path / "TEMPLATE.yaml"
with open(template_file) as f:
return yaml.safe_load(f)
# Load recipe
recipe = load_recipe("ai-blog-generator")
print(f"Recipe: {recipe['name']}")
print(f"Description: {recipe['description']}")
print(f"Required tools: {recipe['requires']['tools']}")
Check Dependencies
Copy
import os
import shutil
def check_dependencies(recipe: dict) -> dict:
"""Check if recipe dependencies are satisfied."""
results = {}
requires = recipe.get("requires", {})
# Check environment variables
for env_var in requires.get("env", []):
results[f"env:{env_var}"] = bool(os.environ.get(env_var))
# Check Python packages
for package in requires.get("packages", []):
try:
__import__(package.replace("-", "_").split("[")[0])
results[f"package:{package}"] = True
except ImportError:
results[f"package:{package}"] = False
# Check external tools
for tool in requires.get("external", []):
results[f"external:{tool}"] = shutil.which(tool) is not None
return results
# Check dependencies
deps = check_dependencies(recipe)
for dep, status in deps.items():
print(f"{'✓' if status else '✗'} {dep}")
Building Custom Recipes
Create a Custom Recipe
Copy
from praisonai_tools.recipe_tools import LLMTool
class BlogGenerator:
"""Custom blog post generator recipe."""
def __init__(self, provider="openai", model="gpt-4o-mini"):
self.llm = LLMTool(provider=provider, model=model)
def generate(self, topic: str, style: str = "professional") -> str:
"""Generate a blog post on the given topic."""
prompt = f"""Write a blog post about: {topic}
Style: {style}
Include:
- Engaging introduction
- 3-4 main sections with headers
- Practical examples
- Call to action
Format as Markdown."""
response = self.llm.complete(
prompt,
system="You are an expert blog writer.",
max_tokens=2000
)
return response.content
# Use custom recipe
generator = BlogGenerator()
post = generator.generate("AI in Healthcare", style="conversational")
print(post)
Create a Multi-Tool Recipe
Copy
from praisonai_tools.recipe_tools import LLMTool, ChartTool
class ReportGenerator:
"""Generate reports with charts from data."""
def __init__(self):
self.llm = LLMTool(provider="openai")
self.chart = ChartTool()
def generate(self, data_file: str, output_dir: str = "./report"):
import pandas as pd
import os
os.makedirs(output_dir, exist_ok=True)
# Load data
df = pd.read_csv(data_file)
# Generate summary with LLM
summary_prompt = f"""Analyze this data and provide key insights:
Columns: {list(df.columns)}
Sample: {df.head().to_string()}
Stats: {df.describe().to_string()}"""
summary = self.llm.complete(summary_prompt)
# Generate chart
numeric_cols = df.select_dtypes(include=['number']).columns
if len(numeric_cols) > 0:
self.chart.bar(
df[numeric_cols[0]].to_dict(),
title=f"Distribution of {numeric_cols[0]}",
output=f"{output_dir}/chart.png"
)
# Write report
with open(f"{output_dir}/report.md", "w") as f:
f.write(f"# Data Report\n\n{summary.content}\n")
return {"summary": summary.content, "chart": f"{output_dir}/chart.png"}
# Use multi-tool recipe
reporter = ReportGenerator()
result = reporter.generate("sales.csv")
Error Handling
Copy
from praisonai_tools.recipe_tools import LLMTool
from praisonai_tools.recipe_tools.base import DependencyError
try:
llm = LLMTool(provider="openai")
deps = llm.check_dependencies()
if not deps.get("api_key"):
raise DependencyError("OPENAI_API_KEY not set")
response = llm.complete("Hello")
except DependencyError as e:
print(f"Missing dependency: {e}")
print("Install with: pip install openai")
print("Set: export OPENAI_API_KEY=your-key")
except Exception as e:
print(f"Error: {e}")
Integration with PraisonAI Agents
Copy
from praisonaiagents import Agent
from praisonai_tools.recipe_tools import LLMTool
# Create agent with recipe tool
llm_tool = LLMTool(provider="openai")
agent = Agent(
name="Content Creator",
instructions="You create engaging content using AI tools.",
tools=[llm_tool.complete]
)
# Run agent
result = agent.run("Create a social media post about our new product launch")
print(result)
Best Practices
- Always check dependencies before running recipes
- Use environment variables for API keys
- Handle errors gracefully with try/except
- Use dry-run mode for testing
- Cache results when appropriate
- Log execution metadata for debugging
Next Steps
- CLI Reference - Command line usage
- Deploy as Server - HTTP API deployment
- Troubleshooting - Common issues

