Setup
Copy
# Create environment
python3 -m venv venv && source venv/bin/activate
# Install packages
pip install praisonaiagents praisonai
# Set API key
export OPENAI_API_KEY="your-key"
Create Sample Data
Copy
# Create project submissions
cat > submissions.json << 'EOF'
[
{"id": "PROJ001", "name": "AI Health Monitor", "team": "HealthTech", "description": "Real-time health monitoring using wearables and AI", "tech_stack": ["Python", "TensorFlow", "React Native"], "demo_url": "https://demo.example.com/health"},
{"id": "PROJ002", "name": "EcoTrack", "team": "GreenCode", "description": "Carbon footprint tracker with gamification", "tech_stack": ["Node.js", "MongoDB", "Vue.js"], "demo_url": "https://demo.example.com/eco"},
{"id": "PROJ003", "name": "StudyBuddy", "team": "EduAI", "description": "AI-powered study assistant with adaptive learning", "tech_stack": ["Python", "OpenAI", "Next.js"], "demo_url": "https://demo.example.com/study"}
]
EOF
# Create judging criteria
cat > criteria.json << 'EOF'
{
"innovation": {"weight": 0.25, "description": "Originality and creativity"},
"technical": {"weight": 0.25, "description": "Technical complexity and implementation"},
"presentation": {"weight": 0.20, "description": "Demo quality and clarity"},
"impact": {"weight": 0.20, "description": "Potential real-world impact"},
"completeness": {"weight": 0.10, "description": "Project completion level"}
}
EOF
Run: Python Code
Copy
from praisonaiagents import Agent, Agents, Task, tool
from praisonaiagents import Agent, Agents, Task, tool, db
from pydantic import BaseModel
from typing import List
import json
# Structured output
class ProjectScore(BaseModel):
project_id: str
project_name: str
innovation_score: int # 0-100
technical_score: int
presentation_score: int
impact_score: int
completeness_score: int
weighted_total: float
strengths: List[str]
improvements: List[str]
recommendation: str
# Database persistence
db_instance = db(database_url="sqlite:///hackathon.db")
# Tools
@tool
def get_submission(project_id: str) -> str:
"""Get project submission details."""
with open("submissions.json") as f:
submissions = json.load(f)
for s in submissions:
if s["id"] == project_id:
return json.dumps(s)
return json.dumps({"error": "Project not found"})
@tool
def get_criteria() -> str:
"""Get judging criteria and weights."""
with open("criteria.json") as f:
return f.read()
@tool
def calculate_weighted_score(innovation: int, technical: int, presentation: int, impact: int, completeness: int) -> str:
"""Calculate weighted total score."""
weights = {"innovation": 0.25, "technical": 0.25, "presentation": 0.20, "impact": 0.20, "completeness": 0.10}
total = (innovation * weights["innovation"] + technical * weights["technical"] +
presentation * weights["presentation"] + impact * weights["impact"] +
completeness * weights["completeness"])
return json.dumps({"weighted_total": round(total, 2)})
# Agents
reviewer = Agent(
name="ProjectReviewer",
instructions="Review project submissions. Use get_submission tool.",
tools=[get_submission],
db=db_instance,
session_id="hackathon-judge"
)
technical_judge = Agent(
name="TechnicalJudge",
instructions="""Evaluate technical aspects:
- Code quality and architecture
- Technology choices
- Scalability potential
Score 0-100 for technical complexity."""
)
innovation_judge = Agent(
name="InnovationJudge",
instructions="""Evaluate innovation:
- Originality of idea
- Creative problem solving
- Market differentiation
Score 0-100 for innovation."""
)
scorer = Agent(
name="FinalScorer",
instructions="Calculate final scores. Use calculate_weighted_score and get_criteria tools.",
tools=[calculate_weighted_score, get_criteria]
)
# Tasks
review_task = Task(
description="Review project: {project_id}",
agent=reviewer,
expected_output="Project details and initial assessment"
)
technical_task = Task(
description="Evaluate technical implementation",
agent=technical_judge,
expected_output="Technical score with justification"
)
innovation_task = Task(
description="Evaluate innovation and creativity",
agent=innovation_judge,
expected_output="Innovation score with justification"
)
score_task = Task(
description="Calculate final weighted score and generate feedback",
agent=scorer,
expected_output="Complete project evaluation",
output_pydantic=ProjectScore
)
# Run for each project
with open("submissions.json") as f:
projects = json.load(f)
results = []
for proj in projects:
agents = Agents(
agents=[reviewer, technical_judge, innovation_judge, scorer],
tasks=[review_task, technical_task, innovation_task, score_task]
)
result = agents.start(project_id=proj["id"])
results.append(result)
print(f"{proj['name']}: {result}")
# Rank projects
print("\n=== FINAL RANKINGS ===")
Run: CLI
Copy
# Judge single project
praisonai "Evaluate hackathon project: AI Health Monitor" --verbose
# With persistence
praisonai "Score all hackathon submissions" --memory --user-id judge_panel
# Interactive judging
praisonai --chat-mode --memory --user-id hackathon_judges
Run: agents.yaml
Createagents.yaml:
Copy
framework: praisonai
topic: "hackathon project judging"
roles:
reviewer:
role: Project Reviewer
goal: Understand project scope and implementation
backstory: Experienced hackathon organizer
tasks:
review:
description: |
Review submission:
- Project description
- Tech stack analysis
- Demo evaluation
expected_output: Project overview
technical:
role: Technical Judge
goal: Evaluate technical excellence
backstory: Senior software architect
tasks:
evaluate:
description: |
Score (0-100):
- Code quality
- Architecture design
- Technology choices
- Scalability
expected_output: Technical score with feedback
innovation:
role: Innovation Judge
goal: Assess creativity and originality
backstory: Startup founder and investor
tasks:
assess:
description: |
Score (0-100):
- Originality
- Problem-solution fit
- Market potential
expected_output: Innovation score with feedback
Copy
praisonai agents.yaml --verbose
Monitor & Verify
Copy
# View judging history
praisonai --history 10 --user-id judge_panel
# Check metrics
praisonai --metrics
# Export scores
praisonai --save hackathon_results
Serve API
Copy
from praisonaiagents import Agent, tool
import json
@tool
def quick_score(innovation: int, technical: int, presentation: int, impact: int) -> str:
"""Quick weighted score calculation."""
weights = {"innovation": 0.25, "technical": 0.25, "presentation": 0.25, "impact": 0.25}
total = (innovation * weights["innovation"] + technical * weights["technical"] +
presentation * weights["presentation"] + impact * weights["impact"])
if total >= 85:
tier = "Winner"
elif total >= 75:
tier = "Finalist"
elif total >= 60:
tier = "Honorable Mention"
else:
tier = "Participant"
return json.dumps({"total_score": round(total, 1), "tier": tier})
agent = Agent(
name="JudgingAPI",
instructions="Calculate hackathon project scores.",
tools=[quick_score]
)
agent.launch(path="/score", port=8000)
Copy
curl -X POST http://localhost:8000/score \
-H "Content-Type: application/json" \
-d '{"message": "Score project: innovation 85, technical 90, presentation 80, impact 75"}'
Cleanup
Copy
rm -f hackathon.db submissions.json criteria.json
deactivate
Features Demonstrated
| Feature | Implementation |
|---|---|
| Multi-agent | Reviewer → Technical → Innovation → Scorer |
| Structured Output | Pydantic ProjectScore |
| Weighted Scoring | Configurable criteria weights |
| Batch Processing | Loop through all submissions |
| DB Persistence | SQLite via db() |
| CLI | --chat-mode for interactive |
| YAML Config | 3-agent judging pipeline |
| API Endpoint | agent.launch() |

