from praisonaiagents import Agent, Task, LLMGuardrail
from pydantic import BaseModel
# Define output schema
class CustomerAnalysis(BaseModel):
customer_id: str
satisfaction_score: float
recommendations: list[str]
follow_up_required: bool
# Create agents
analyst = Agent(role="Data Analyst", goal="Analyze customer data")
writer = Agent(role="Report Writer", goal="Create clear reports")
# Create guardrails
accuracy_check = LLMGuardrail(
llm="gpt-4",
description="Verify the analysis is accurate and complete"
)
def score_validation(output: TaskOutput) -> GuardrailResult:
if output.json_output.get("satisfaction_score", 0) > 10:
return GuardrailResult(
success=False,
error="Satisfaction score must be between 0-10"
)
return GuardrailResult(success=True)
# Create tasks
analysis_task = Task(
description="Analyze customer feedback for customer {customer_id}",
expected_output="Complete analysis with satisfaction score and recommendations",
agent=analyst,
task_type="loop",
input_file="customers.csv",
output_json=CustomerAnalysis,
guardrails=[accuracy_check, score_validation],
max_retries=3,
memory=True,
quality_check=True,
callback=lambda o: print(f"Analyzed: {o.json_output['customer_id']}")
)
report_task = Task(
description="Create executive summary of all customer analyses",
expected_output="Executive report highlighting key findings and action items",
agent=writer,
context=[analysis_task],
output_file="reports/customer_summary.md",
create_directory=True
)
# Execute workflow using Agents
from praisonaiagents import AgentTeam
agents_runner = AgentTeam(
agents=[analyst, writer],
tasks={"analyze": analysis_task, "report": report_task}
)
agents_runner.start()