Task

The Task class represents a unit of work to be executed by an agent, with support for various output formats, guardrails, validation feedback, and sophisticated retry mechanisms.

Overview

Tasks are the fundamental building blocks of agent workflows. They define what needs to be done, who should do it, and how the results should be validated and processed. Tasks support multiple types (regular, decision, loop), various output formats, and advanced features like guardrails and quality metrics.

Basic Usage

from praisonaiagents import Agent, Task

# Create an agent
researcher = Agent(role="Researcher", goal="Find accurate information")

# Create a simple task
task = Task(
    description="Research the latest AI developments",
    expected_output="A comprehensive summary of recent AI breakthroughs",
    agent=researcher
)

# Execute the task
result = task.execute()
print(result.raw)  # Access the raw output

Task Types

Regular Task

Standard tasks for single operations:
analysis_task = Task(
    description="Analyze sales data for Q4",
    expected_output="Detailed analysis with insights and recommendations",
    agent=analyst,
    task_type="task"  # Default type
)

Decision Task

Tasks that route workflow based on output:
review_task = Task(
    description="Review document quality and decide next action",
    expected_output="Decision: approve, revise, or reject",
    agent=reviewer,
    task_type="decision",
    condition={
        "approve": ["publish_task"],
        "revise": ["edit_task", "review_task"],  # Can route to multiple tasks
        "reject": ["archive_task"]
    }
)

Loop Task

Tasks that process batches of data:
# Process CSV file
batch_task = Task(
    description="Process customer: {name} with email: {email}",
    expected_output="Personalized message sent",
    agent=processor,
    task_type="loop",
    input_file="customers.csv"  # CSV file path
)

# Process text file (one item per line)
list_task = Task(
    description="Analyze item: {item}",
    expected_output="Analysis complete",
    agent=analyzer,
    task_type="loop",
    input_file="items.txt"
)

# Process file-based data
# Note: For in-memory lists, create a temporary file or use workflow logic
# The Task class expects input_file for loop tasks

Output Formats

Raw Output

Default text-based output:
task = Task(
    description="Write a story",
    expected_output="A short story",
    agent=writer
)
result = task.execute()
print(result.raw)  # String output

JSON Output

Structured JSON with schema validation:
from pydantic import BaseModel

# Define schema
class AnalysisResult(BaseModel):
    summary: str
    score: float
    recommendations: list[str]

json_task = Task(
    description="Analyze performance",
    expected_output="Structured analysis results",
    agent=analyst,
    output_json=AnalysisResult  # Enforces JSON schema
)

result = task.execute()
print(result.json_output)  # Validated JSON object

Pydantic Output

Type-safe output with Pydantic models:
class Report(BaseModel):
    title: str
    sections: list[str]
    conclusion: str
    metadata: dict

pydantic_task = Task(
    description="Generate report",
    expected_output="Complete report with all sections",
    agent=reporter,
    output_pydantic=Report  # Returns Pydantic model instance
)

result = task.execute()
report = result.pydantic_output  # Type: Report
print(report.title)  # Autocomplete and type checking

File Output

Save results directly to files:
file_task = Task(
    description="Generate CSV report",
    expected_output="Sales data in CSV format",
    agent=data_agent,
    output_file="reports/sales_q4.csv",
    create_directory=True  # Creates reports/ if doesn't exist
)

Guardrails and Validation

Function-Based Guardrails

from praisonaiagents import TaskOutput
from praisonaiagents.guardrails import GuardrailResult

def quality_check(output: TaskOutput) -> GuardrailResult:
    """Ensure output meets quality standards"""
    if len(output.raw) < 100:
        return GuardrailResult(
            success=False,
            error="Output too short, needs more detail"
        )
    if "error" in output.raw.lower():
        return GuardrailResult(
            success=False,
            error="Output contains errors"
        )
    return GuardrailResult(success=True)

validated_task = Task(
    description="Write detailed analysis",
    expected_output="Comprehensive analysis document",
    agent=analyst,
    guardrails=[quality_check]
)

LLM-Based Guardrails

from praisonaiagents.guardrails import LLMGuardrail

# Create LLM guardrail
tone_guardrail = LLMGuardrail(
    llm="gpt-4o-mini",
    instructions="Check if the output is professional and formal in tone"
)

accuracy_guardrail = LLMGuardrail(
    llm="gpt-4o",
    instructions="Verify all facts and figures are accurate"
)

task = Task(
    description="Write business proposal",
    expected_output="Professional proposal document",
    agent=writer,
    guardrails=[tone_guardrail, accuracy_guardrail]
)

Validation Feedback and Retry

# Define the expected schema
from pydantic import BaseModel

class ConfigSchema(BaseModel):
    api_key: str
    endpoint: str
    timeout: int
    settings: dict

# Task with validation retry
validated_task = Task(
    description="Generate valid JSON configuration",
    expected_output="Valid JSON with all required fields",
    agent=developer,
    output_json=ConfigSchema,  # Enforces schema validation
    max_retries=3  # Max retry attempts
)

# Custom validation with feedback
def validate_with_feedback(output: TaskOutput) -> GuardrailResult:
    issues = []
    if "api_key" not in output.raw:
        issues.append("Missing api_key field")
    if "endpoint" not in output.raw:
        issues.append("Missing endpoint URL")
    
    if issues:
        return GuardrailResult(
            success=False,
            error=f"Please fix: {', '.join(issues)}"
        )
    return GuardrailResult(success=True)

task.guardrails = [validate_with_feedback]

Memory Integration

Automatic Memory Storage

# Task results automatically stored in memory
memory_task = Task(
    description="Research customer preferences",
    expected_output="Customer insights and preferences",
    agent=researcher,
    memory=True  # Enable memory storage
    # Memory backend is configured at the agent or global level
)

# Results are stored with:
# - Task description as context
# - Output as memory content
# - Quality score for retrieval ranking
# - Timestamp and metadata

Quality Metrics

# Task with quality metrics
quality_task = Task(
    description="Write high-quality content",
    expected_output="Engaging article",
    agent=writer,
    quality_check=True  # Enable quality scoring
    # Quality threshold is handled by the Process or guardrails
)

# Quality metrics include:
# - Completeness score
# - Relevance to expected output
# - Coherence and clarity
# - Task-specific criteria

Context Management

Basic Context

# Chain tasks with context
research = Task(description="Research topic", agent=researcher)
write = Task(
    description="Write article based on research",
    agent=writer,
    context=[research]  # Access research results
)

Selective Context

# Control what context is retained
analysis = Task(
    description="Analyze data",
    agent=analyst,
    retain_full_context=True  # Keep all output for future tasks
    # By default, only the task output is retained, not full context
)

report = Task(
    description="Generate report",
    agent=reporter,
    context=[analysis]
    # Context size is managed automatically
)

Callbacks and Metadata

Sync Callbacks

def task_callback(output: TaskOutput):
    """Process task output"""
    print(f"Task completed: {output.task_id}")
    print(f"Execution time: {output.metadata['execution_time']}")
    log_to_database(output)

task = Task(
    description="Process data",
    agent=processor,
    callback=task_callback
)

Async Callbacks

async def async_callback(output: TaskOutput):
    """Async processing of output"""
    await send_notification(output.task_id)
    await update_dashboard(output.metadata)
    
    # Access metadata
    print(f"Agent used: {output.metadata['agent_role']}")
    print(f"Tools used: {output.metadata['tools_used']}")
    print(f"Retry count: {output.metadata['retry_count']}")

async_task = Task(
    description="Async operation",
    agent=agent,
    async_execution=True,
    callback=async_callback
)

Callback Metadata

Callbacks receive rich metadata including:
  • task_id: Unique task identifier
  • agent_role: Agent that executed the task
  • execution_time: Time taken to complete
  • tools_used: Tools utilized during execution
  • retry_count: Number of retry attempts
  • validation_results: Guardrail check results
  • quality_score: Task quality metrics
  • context_used: Context from previous tasks

Advanced Features

Multimodal Tasks

# Task with image inputs
vision_task = Task(
    description="Analyze these charts and provide insights",
    expected_output="Detailed analysis of trends",
    agent=analyst,
    images=["chart1.png", "chart2.png", "data_viz.jpg"]
)

Task Configuration

# Advanced configuration
configured_task = Task(
    description="Complex operation",
    agent=specialist,
    config={
        "temperature": 0.7,
        "max_tokens": 2000,
        "tools_enabled": ["calculator", "file_reader"],
        "memory_query_limit": 10,
        "parallel_tool_calls": True
    }
)

Error Handling

# Robust error handling
resilient_task = Task(
    description="Critical operation",
    agent=primary_agent,
    max_retries=5  # Retry on failure
    # Error handling and fallback logic should be implemented
    # at the Process or workflow level
)

Best Practices

  1. Clear Descriptions: Be specific about what the task should accomplish
    # Good
    description="Analyze Q4 sales data and identify top 3 growth opportunities"
    
    # Less effective
    description="Analyze data"
    
  2. Expected Output: Define clear success criteria
    expected_output="""
    A report containing:
    1. Executive summary (200 words)
    2. Key findings (3-5 bullet points)
    3. Recommendations with priority levels
    """
    
  3. Appropriate Guardrails: Use guardrails for critical tasks
    # Financial task needs strict validation
    guardrails=[
        validate_numbers,
        check_compliance,
        LLMGuardrail(llm="gpt-4", instructions="Verify all calculations")
    ]
    
  4. Context Management: Only pass necessary context
    # Don't pass entire history
    context=[relevant_task1, relevant_task2]  # Not all_previous_tasks
    

Complete Example

from praisonaiagents import Agent, Task, LLMGuardrail
from pydantic import BaseModel

# Define output schema
class CustomerAnalysis(BaseModel):
    customer_id: str
    satisfaction_score: float
    recommendations: list[str]
    follow_up_required: bool

# Create agents
analyst = Agent(role="Data Analyst", goal="Analyze customer data")
writer = Agent(role="Report Writer", goal="Create clear reports")

# Create guardrails
accuracy_check = LLMGuardrail(
    llm="gpt-4",
    instructions="Verify the analysis is accurate and complete"
)

def score_validation(output: TaskOutput) -> GuardrailResult:
    if output.json_output.get("satisfaction_score", 0) > 10:
        return GuardrailResult(
            success=False,
            error="Satisfaction score must be between 0-10"
        )
    return GuardrailResult(success=True)

# Create tasks
analysis_task = Task(
    description="Analyze customer feedback for customer {customer_id}",
    expected_output="Complete analysis with satisfaction score and recommendations",
    agent=analyst,
    task_type="loop",
    input_file="customers.csv",
    output_json=CustomerAnalysis,
    guardrails=[accuracy_check, score_validation],
    max_retries=3,
    memory=True,
    quality_check=True,
    callback=lambda o: print(f"Analyzed: {o.json_output['customer_id']}")
)

report_task = Task(
    description="Create executive summary of all customer analyses",
    expected_output="Executive report highlighting key findings and action items",
    agent=writer,
    context=[analysis_task],
    output_file="reports/customer_summary.md",
    create_directory=True
)

# Execute workflow
from praisonaiagents import Process

process = Process(
    tasks={"analyze": analysis_task, "report": report_task},
    agents=[analyst, writer]
)

process.workflow()

See Also