Skip to main content

App Developer Persona

Role: Build applications that use PraisonAI recipes to deliver AI-powered features to end users.

Primary Goals

  • Integrate recipes into applications with minimal friction
  • Handle errors gracefully to maintain good UX
  • Optimize latency for responsive user experiences
  • Stream results for real-time feedback

Typical Workflow

1

Discover Available Recipes

from praisonai import recipe

# List all recipes
recipes = recipe.list_recipes()
for r in recipes:
    print(f"{r.name}: {r.description}")

# Get recipe details
info = recipe.describe("support-reply-drafter")
print(f"Inputs: {info.config_schema}")
2

Integrate into Application

from praisonai import recipe
from praisonai.recipe import RecipeError

def handle_support_ticket(ticket_id: str, message: str) -> str:
    """Generate AI-powered support reply."""
    try:
        result = recipe.run(
            "support-reply-drafter",
            input={"ticket_id": ticket_id, "message": message},
            options={"timeout_sec": 30}
        )
        
        if result.ok:
            return result.output
        else:
            # Fallback to manual handling
            return None
            
    except RecipeError as e:
        logger.error(f"Recipe failed: {e}")
        return None
3

Add Streaming for Better UX

async def stream_response(recipe_name: str, input_data: dict):
    """Stream recipe output to frontend."""
    for event in recipe.run_stream(recipe_name, input=input_data):
        if event.event_type == "progress":
            yield {"type": "progress", "message": event.data.get("message")}
        elif event.event_type == "completed":
            yield {"type": "complete", "output": event.data.get("output")}
        elif event.event_type == "error":
            yield {"type": "error", "message": event.data.get("message")}
4

Handle Errors Gracefully

from praisonai.recipe import (
    RecipeError,
    RecipeNotFoundError,
    RecipeDependencyError,
    RecipeValidationError
)

def run_recipe_safely(name: str, input_data: dict) -> dict:
    """Run recipe with comprehensive error handling."""
    try:
        result = recipe.run(name, input=input_data)
        return {"success": True, "output": result.output}
        
    except RecipeNotFoundError:
        return {"success": False, "error": "Recipe not available"}
    except RecipeDependencyError as e:
        return {"success": False, "error": f"Missing dependency: {e}"}
    except RecipeValidationError as e:
        return {"success": False, "error": f"Invalid input: {e}"}
    except RecipeError as e:
        return {"success": False, "error": f"Recipe failed: {e}"}

Key Concerns

Latency Optimization

# Use connection pooling for HTTP clients
import requests
from requests.adapters import HTTPAdapter

session = requests.Session()
adapter = HTTPAdapter(pool_connections=10, pool_maxsize=10)
session.mount("http://", adapter)

# Cache recipe metadata
from functools import lru_cache

@lru_cache(maxsize=100)
def get_recipe_info(name: str):
    return recipe.describe(name)

Error Recovery

def run_with_fallback(primary: str, fallback: str, input_data: dict):
    """Try primary recipe, fall back if it fails."""
    try:
        result = recipe.run(primary, input=input_data, options={"timeout_sec": 10})
        if result.ok:
            return result.output
    except Exception:
        pass
    
    # Fallback
    result = recipe.run(fallback, input=input_data)
    return result.output if result.ok else None

User Experience

# Show progress to users
def run_with_progress(name: str, input_data: dict, progress_callback):
    """Run recipe with progress updates."""
    for event in recipe.run_stream(name, input=input_data):
        if event.event_type == "progress":
            progress_callback(
                step=event.data.get("step"),
                message=event.data.get("message"),
                percent=event.data.get("percent", 0)
            )
        elif event.event_type == "completed":
            return event.data.get("output")
    return None

Common Patterns

Request-Response Pattern

# Simple synchronous call
result = recipe.run("summarizer", input={"text": document})
summary = result.output if result.ok else "Summary unavailable"

Fire-and-Forget Pattern

import threading

def run_async(name: str, input_data: dict, callback):
    """Run recipe in background thread."""
    def worker():
        result = recipe.run(name, input=input_data)
        callback(result)
    
    thread = threading.Thread(target=worker)
    thread.start()

Batch Processing Pattern

from concurrent.futures import ThreadPoolExecutor

def process_batch(items: list, recipe_name: str) -> list:
    """Process multiple items in parallel."""
    def process_one(item):
        result = recipe.run(recipe_name, input=item)
        return result.output if result.ok else None
    
    with ThreadPoolExecutor(max_workers=5) as executor:
        return list(executor.map(process_one, items))

Troubleshooting

  • Check network latency to recipe server
  • Use streaming for perceived performance
  • Consider caching for repeated inputs
  • Profile to find bottlenecks
  • Implement retry logic with exponential backoff
  • Add circuit breaker for failing recipes
  • Log errors with context for debugging
  • Set appropriate timeouts
  • Stream large outputs instead of buffering
  • Process results incrementally
  • Clear caches periodically
  • Monitor memory usage

Security Checklist

  • Validate user input before passing to recipes
  • Don’t expose recipe errors directly to users
  • Store API keys securely (env vars, secrets manager)
  • Implement rate limiting on your API
  • Log recipe calls for audit trail
  • Sanitize recipe output before displaying

Next Steps