Use this file to discover all available pages before exploring further.
Quality checking in PraisonAI automatically evaluates task outputs, calculates quality scores, and stores high-quality results in long-term memory for future reference.
from praisonaiagents import Agent, Task, AgentTeam, Memory# Create writer with memory for quality trackingblog_writer = Agent( name="Blog Writer", role="Professional blogger", goal="Create engaging, high-quality blog posts", backstory="Expert writer with years of experience", memory=Memory(config={ "provider": "rag", "use_embedding": True, "rag_db_path": ".praison/blog_memory" }))# Define quality taskblog_task = Task( name="Write blog post", description="Write a 1000-word blog post about 'The Future of Remote Work'", expected_output="""A complete blog post with: - Catchy title - Introduction (150-200 words) - 3-4 main sections with subheadings - Conclusion (150-200 words) - Call to action""", agent=blog_writer, quality_check=True)# Run taskworkflow = AgentTeam( agents=[blog_writer], tasks=[blog_task])result = workflow.start()# Access quality informationtask_result = result['task_results']['Write blog post']print(f"Quality Score: {task_result.metadata.get('quality_score', 0):.2f}")print(f"Stored in Memory: {task_result.metadata.get('stored_in_memory', False)}")# High-quality outputs can be retrieved laterif task_result.metadata.get('stored_in_memory'): # Search for similar high-quality content similar_posts = blog_writer.memory.search_long_term( "blog posts about remote work", relevance_cutoff=0.7 ) print(f"Found {len(similar_posts)} similar high-quality posts")
from praisonaiagents import Agent, Task, AgentTeam, Memory# Code generator with quality trackingcode_agent = Agent( name="Code Generator", role="Senior developer", goal="Write clean, efficient, well-documented code", llm="gpt-4", # Better model for code generation memory=Memory(config={"provider": "chroma"}))# Multiple tasks with quality checkingtasks = [ Task( name="API endpoint", description="Create a REST API endpoint for user authentication", expected_output="Complete Python code with Flask, including error handling and documentation", agent=code_agent, quality_check=True ), Task( name="Unit tests", description="Write comprehensive unit tests for the authentication endpoint", expected_output="Pytest test cases covering all scenarios", agent=code_agent, quality_check=True ), Task( name="Documentation", description="Write API documentation for the authentication endpoint", expected_output="OpenAPI/Swagger documentation with examples", agent=code_agent, quality_check=True )]# Run all tasksworkflow = AgentTeam( agents=[code_agent], tasks=tasks, process="sequential")results = workflow.start()# Analyze quality across tasksquality_report = {}for task_name, result in results['task_results'].items(): quality_report[task_name] = { 'score': result.metadata.get('quality_score', 0), 'stored': result.metadata.get('stored_in_memory', False) }print("Quality Report:")for task, metrics in quality_report.items(): print(f"- {task}: Score={metrics['score']:.2f}, Stored={metrics['stored']}")# Calculate average qualityavg_quality = sum(m['score'] for m in quality_report.values()) / len(quality_report)print(f"\nAverage Quality Score: {avg_quality:.2f}")
from praisonaiagents import Agent, Task, AgentTeam, Memoryfrom typing import List, Dict# Research agent with quality memoryresearcher = Agent( name="Research Analyst", role="Research specialist", goal="Conduct thorough research with verified sources", tools=["web_search", "pdf_reader"], memory=Memory(config={"provider": "chroma"}))# Custom quality checker for researchdef research_quality_check(output: str) -> float: """Custom quality scoring for research outputs""" score = 0.0 # Check for citations if "References:" in output or "[1]" in output: score += 0.3 # Check for multiple sources source_indicators = ["according to", "study shows", "research indicates"] sources_found = sum(1 for indicator in source_indicators if indicator in output.lower()) score += min(0.3, sources_found * 0.1) # Check for balanced perspective if "however" in output.lower() or "on the other hand" in output.lower(): score += 0.2 # Check for conclusion if "conclusion" in output.lower() or "in summary" in output.lower(): score += 0.2 return min(1.0, score)# Research task with custom qualityresearch_task = Task( name="Market research", description="Research the electric vehicle market trends for 2024-2025", expected_output="Comprehensive research report with sources", agent=researcher, quality_check=True, quality_checker=research_quality_check # Custom quality function)# Execute researchworkflow = AgentTeam( agents=[researcher], tasks=[research_task])result = workflow.start()# Only high-quality research is storedif result['task_results']['Market research'].metadata.get('stored_in_memory'): print("High-quality research stored for future reference") # Retrieve high-quality research on similar topics related_research = researcher.memory.search_long_term( "electric vehicle market analysis", relevance_cutoff=0.8, limit=5 ) print(f"\nFound {len(related_research)} related high-quality research pieces")