Skip to main content
Guardrails validate and filter agent outputs to ensure safety, quality, and compliance.

Quick Start

1

Create a Guardrail

use praisonai::{Guardrail, GuardrailResult};

struct ContentFilter;

impl Guardrail for ContentFilter {
    fn name(&self) -> &str { "content_filter" }
    fn description(&self) -> &str { "Filters inappropriate content" }
    
    fn validate(&self, content: &str) -> GuardrailResult {
        if content.contains("inappropriate") {
            GuardrailResult::failure("Content contains inappropriate material")
        } else {
            GuardrailResult::pass()
        }
    }
}
2

Async Guardrail

use praisonai::{AsyncGuardrail, GuardrailResult, async_trait};

struct LlmFilter;

#[async_trait]
impl AsyncGuardrail for LlmFilter {
    fn name(&self) -> &str { "llm_filter" }
    fn description(&self) -> &str { "LLM-based content moderation" }
    
    async fn validate(&self, content: &str) -> GuardrailResult {
        // Call moderation API
        GuardrailResult::pass()
    }
}

User Interaction Flow


GuardrailResult

Result of guardrail validation.
pub struct GuardrailResult {
    pub success: bool,
    pub result: Option<String>,
    pub error: Option<String>,
    pub metadata: HashMap<String, String>,
}

Factory Methods

MethodSignatureDescription
success(result)fn success(impl Into<String>) -> SelfPass with modified content
pass()fn pass() -> SelfPass without modification
failure(error)fn failure(impl Into<String>) -> SelfFail with error message
from_tuple(success, data)fn from_tuple(bool, impl Into<String>) -> SelfCreate from tuple

Instance Methods

MethodSignatureDescription
with_metadata(key, value)fn with_metadata(self, impl Into<String>, impl Into<String>) -> SelfAdd metadata
is_success()fn is_success(&self) -> boolCheck if passed
is_failure()fn is_failure(&self) -> boolCheck if failed
get_result_or(original)fn get_result_or(&self, &str) -> StringGet result or fallback

Guardrail Trait

Synchronous validation interface.
pub trait Guardrail: Send + Sync {
    fn name(&self) -> &str;
    fn description(&self) -> &str;
    fn validate(&self, content: &str) -> GuardrailResult;
}

AsyncGuardrail Trait

Asynchronous validation interface.
#[async_trait]
pub trait AsyncGuardrail: Send + Sync {
    fn name(&self) -> &str;
    fn description(&self) -> &str;
    async fn validate(&self, content: &str) -> GuardrailResult;
}

GuardrailAction

Action to take when validation fails.
pub enum GuardrailAction {
    Stop,       // default - Stop execution
    Retry,      // Retry the task
    Warn,       // Continue with warning
    Skip,       // Skip and continue
    Fallback,   // Use fallback response
}

GuardrailConfig

Configuration for guardrail behavior.
pub struct GuardrailConfig {
    pub on_failure: GuardrailAction,
    pub max_retries: usize,
    pub fallback_response: Option<String>,
    pub log_results: bool,
    pub error_template: Option<String>,
}

Builder Methods

MethodSignatureDefaultDescription
new()fn new() -> Self-Create config
on_failure(action)fn on_failure(self, GuardrailAction) -> SelfStopSet failure action
max_retries(n)fn max_retries(self, usize) -> Self3Set retry limit
fallback_response(msg)fn fallback_response(self, impl Into<String>) -> SelfNoneSet fallback
log_results(b)fn log_results(self, bool) -> SelftrueEnable logging
error_template(t)fn error_template(self, impl Into<String>) -> SelfNoneCustom error format

Common Guardrail Patterns

Length Limiter

use praisonai::{Guardrail, GuardrailResult};

struct LengthGuardrail { max_length: usize }

impl Guardrail for LengthGuardrail {
    fn name(&self) -> &str { "length_limit" }
    fn description(&self) -> &str { "Limits response length" }
    
    fn validate(&self, content: &str) -> GuardrailResult {
        if content.len() > self.max_length {
            GuardrailResult::success(
                content.chars().take(self.max_length).collect::<String>()
            )
        } else {
            GuardrailResult::pass()
        }
    }
}

Profanity Filter

use praisonai::{Guardrail, GuardrailResult};

struct ProfanityFilter { blocked_words: Vec<String> }

impl Guardrail for ProfanityFilter {
    fn name(&self) -> &str { "profanity_filter" }
    fn description(&self) -> &str { "Blocks profane content" }
    
    fn validate(&self, content: &str) -> GuardrailResult {
        let lower = content.to_lowercase();
        for word in &self.blocked_words {
            if lower.contains(word) {
                return GuardrailResult::failure("Content contains blocked words");
            }
        }
        GuardrailResult::pass()
    }
}

Best Practices

Use different guardrails for different concerns: length, content, format.
When calling moderation APIs, use AsyncGuardrail to avoid blocking.
Clear error messages help users understand why content was blocked.
Use Retry for transient issues, Stop for hard violations, Fallback for graceful degradation.