Skip to main content

Hooks

Intercept and modify agent behavior at various lifecycle points. Unlike callbacks (which are for UI events), hooks can intercept, modify, or block tool execution.

Quick Start

Simplest Usage

from praisonaiagents import Agent
from praisonaiagents.hooks import add_hook

# Register hooks with simple string events
@add_hook('before_tool')
def log_tools(event_data):
    print(f"Tool: {event_data.tool_name}")
    # No return needed - defaults to allow

@add_hook('before_tool')
def security_check(event_data):
    if "delete" in event_data.tool_name.lower():
        return "Delete operations blocked"  # String = deny with reason
    # No return = allow

# Agent automatically uses registered hooks
agent = Agent(
    name="SecureAssistant",
    instructions="You are a helpful assistant."
)

agent.start("Help me organize my files")
Tip: Hook returns are simple:
  • None or no return → Allow
  • False → Deny
  • "reason" → Deny with custom message

Agent-Centric Usage

from praisonaiagents import Agent
from praisonaiagents.hooks import HookRegistry, HookEvent, HookResult, BeforeToolInput

# Create a hook registry
registry = HookRegistry()

# Log all tool calls
@registry.on(HookEvent.BEFORE_TOOL)
def log_tools(event_data: BeforeToolInput) -> HookResult:
    print(f"Tool: {event_data.tool_name}")
    return HookResult.allow()

# Block dangerous operations
@registry.on(HookEvent.BEFORE_TOOL)
def security_check(event_data: BeforeToolInput) -> HookResult:
    if "delete" in event_data.tool_name.lower():
        return HookResult.deny("Delete operations blocked")
    return HookResult.allow()

# Agent with hooks - intercepts tool calls
agent = Agent(
    name="SecureAssistant",
    instructions="You are a helpful assistant.",
    hooks=registry
)

agent.start("Help me organize my files")

Hook Events

Core Events

EventTriggerUse Case
BEFORE_TOOLBefore tool executionSecurity checks, logging
AFTER_TOOLAfter tool executionResult logging, validation
BEFORE_AGENTBefore agent runsSetup, initialization
AFTER_AGENTAfter agent completesCleanup, reporting
BEFORE_LLMBefore LLM API callRequest modification, logging
AFTER_LLMAfter LLM API responseResponse logging, validation
SESSION_STARTWhen session startsSession initialization
SESSION_ENDWhen session endsSession cleanup
ON_ERRORWhen an error occursError handling, recovery
ON_RETRYBefore a retry attemptRetry logic, backoff

Extended Events

EventTriggerUse Case
USER_PROMPT_SUBMITWhen user submits a promptInput validation, logging
NOTIFICATIONWhen notification is sentAlert routing, logging
SUBAGENT_STOPWhen subagent completesSubagent result handling
SETUPOn initialization/maintenanceSystem setup, config loading
BEFORE_COMPACTIONBefore context compactionPre-compaction hooks
AFTER_COMPACTIONAfter context compactionPost-compaction validation
MESSAGE_RECEIVEDWhen message is receivedMessage preprocessing
MESSAGE_SENDINGBefore message is sentMessage modification
MESSAGE_SENTAfter message is sentDelivery confirmation
GATEWAY_STARTWhen gateway startsGateway initialization
GATEWAY_STOPWhen gateway stopsGateway cleanup
TOOL_RESULT_PERSISTBefore tool result storageResult modification

Hook Decisions

DecisionDescription
allowAllow the operation to proceed
denyDeny the operation with a reason
blockBlock the operation silently
askPrompt for user confirmation

CLI Commands

praisonai hooks list                    # List registered hooks
praisonai hooks test before_tool        # Test hooks for an event
praisonai hooks run "echo test"         # Run a command hook
praisonai hooks validate hooks.json     # Validate configuration

Low-level API Reference

HookRegistry Direct Usage

from praisonaiagents.hooks import (
    HookRegistry, HookRunner, HookEvent, HookResult,
    BeforeToolInput
)

# Create a hook registry
registry = HookRegistry()

# Log all tool calls
@registry.on(HookEvent.BEFORE_TOOL)
def log_tools(event_data: BeforeToolInput) -> HookResult:
    print(f"Tool: {event_data.tool_name}")
    return HookResult.allow()

# Block dangerous operations
@registry.on(HookEvent.BEFORE_TOOL)
def security_check(event_data: BeforeToolInput) -> HookResult:
    if "delete" in event_data.tool_name.lower():
        return HookResult.deny("Delete operations blocked")
    return HookResult.allow()

# Execute hooks
runner = HookRunner(registry)
result = runner.run(HookEvent.BEFORE_TOOL, event_data)

Shell Command Hooks

Register external scripts as hooks:
from praisonaiagents.hooks import HookRegistry, HookEvent

registry = HookRegistry()

# Run external validator before file writes
registry.register_command_hook(
    event=HookEvent.BEFORE_TOOL,
    command="python /path/to/file_validator.py",
    matcher="write_*"  # Only for tools starting with write_
)

Matcher Patterns

from praisonaiagents.hooks import HookRegistry, HookEvent, HookResult

registry = HookRegistry()

# Match specific tools
registry.register_function_hook(
    event=HookEvent.BEFORE_TOOL,
    func=my_hook,
    matcher="write_file"  # Exact match
)

# Match with wildcard
registry.register_function_hook(
    event=HookEvent.BEFORE_TOOL,
    func=my_hook,
    matcher="file_*"  # Matches file_read, file_write, etc.
)

# Match multiple patterns
registry.register_function_hook(
    event=HookEvent.BEFORE_TOOL,
    func=my_hook,
    matcher=["read_*", "write_*"]  # Multiple patterns
)

Configuration File

Create .praison/hooks.json in your project:
{
  "enabled": true,
  "timeout": 30,
  "hooks": {
    "pre_write_code": "./scripts/lint.sh",
    "post_write_code": [
      "./scripts/format.sh",
      "./scripts/git-add.sh"
    ],
    "pre_run_command": {
      "command": "./scripts/validate-command.sh",
      "timeout": 60,
      "enabled": true,
      "block_on_failure": true,
      "pass_input": true
    }
  }
}

LLM Hooks

Intercept LLM API calls for logging, modification, or validation:
from praisonaiagents import Agent
from praisonaiagents.hooks import HookRegistry, HookEvent, HookResult, BeforeLLMInput, AfterLLMInput

registry = HookRegistry()

@registry.on(HookEvent.BEFORE_LLM)
def log_llm_request(event_data: BeforeLLMInput) -> HookResult:
    """Log LLM requests before they are sent."""
    print(f"LLM Request: {len(event_data.messages)} messages")
    print(f"Model: {event_data.model}")
    return HookResult.allow()

@registry.on(HookEvent.AFTER_LLM)
def log_llm_response(event_data: AfterLLMInput) -> HookResult:
    """Log LLM responses after they are received."""
    print(f"LLM Response: {len(event_data.response)} chars")
    print(f"Tokens used: {event_data.usage}")
    return HookResult.allow()

agent = Agent(
    name="MonitoredAgent",
    instructions="You are helpful.",
    hooks=registry
)

Error and Retry Hooks

Handle errors and customize retry behavior:
from praisonaiagents.hooks import OnErrorInput, OnRetryInput

@registry.on(HookEvent.ON_ERROR)
def handle_error(event_data: OnErrorInput) -> HookResult:
    """Handle errors during agent execution."""
    print(f"Error occurred: {event_data.error}")
    print(f"Context: {event_data.context}")
    # Log to external service, send alert, etc.
    return HookResult.allow()

@registry.on(HookEvent.ON_RETRY)
def handle_retry(event_data: OnRetryInput) -> HookResult:
    """Customize retry behavior."""
    print(f"Retry attempt {event_data.attempt} of {event_data.max_retries}")
    if event_data.attempt > 2:
        return HookResult.deny("Too many retries")
    return HookResult.allow()

Best Practices

  1. Keep hooks lightweight - Hooks run synchronously, avoid heavy operations
  2. Use matchers - Only run hooks for relevant tools
  3. Return early - Return allow quickly for non-matching cases
  4. Log decisions - Log why hooks deny operations for debugging
  5. Handle errors - Wrap hook logic in try/except to avoid breaking agents

See Also