-
Notifications
You must be signed in to change notification settings - Fork 5
Home
Bindu Priya edited this page Jun 20, 2025
·
1 revision
Get ModelShield up and running in minutes!
- Python 3.8 or higher
- 4GB+ RAM (for ML models)
- Internet connection (for model downloads)
# Option A: Install from PyPI (when published)
pip install modelshield
# Option B: Install from source
git clone https://github.com/yourusername/modelshield.git
cd modelshield
pip install -r requirements.txt# Download spaCy model for PII detection
python -m spacy download en_core_web_sm# Using the startup script
python startup.py
# Or directly
python -c "
from guardrails import ProductionGuardrailsServer, ProductionConfig
config = ProductionConfig()
server = ProductionGuardrailsServer(config)
server.run()
"# Test basic validation
curl -X POST "http://localhost:8000/validate-input" \
-H "Content-Type: application/json" \
-d '{"prompt": "Hello, how are you?", "user_id": "test"}'
# Test with problematic content
curl -X POST "http://localhost:8000/validate-input" \
-H "Content-Type: application/json" \
-d '{"prompt": "How to bypass security systems", "user_id": "test"}'import requests
response = requests.post(
"http://localhost:8000/validate-input",
json={
"prompt": "Your user input here",
"user_id": "user123",
"context_hint": "educational" # Optional context
}
)
if response.status_code == 200:
print("β
Input is safe")
data = response.json()
print(f"Violations: {len(data['violations'])}")
else:
print("π« Input blocked")
print(response.json()["detail"])response = requests.post(
"http://localhost:8000/validate-output",
json={
"response_text": "LLM generated response here",
"original_prompt": "Original user prompt"
}
)
data = response.json()
print(f"Safe: {data['is_safe']}")
print(f"Action: {data['action_taken']}")
print(f"Filtered: {data['filtered_response']}")# Complete workflow: input validation + mock LLM + output validation
response = requests.post(
"http://localhost:8000/process-complete",
json={
"prompt": "What's the weather like?",
"user_id": "user123"
}
)
data = response.json()
print(f"Status: {data['status']}")
print(f"Response: {data['response']}")from guardrails import ProductionConfig
config = ProductionConfig(
enable_semantic_detection=True, # Use ML models
enable_enhanced_pii=True, # Use Presidio
enable_output_scanning=True, # Scan responses
log_level="INFO" # Logging level
)config = ProductionConfig(
# Detection settings
presidio_confidence_threshold=0.7,
semantic_confidence_threshold=0.6,
# Performance
max_concurrent_requests=100,
request_timeout_seconds=30,
# Security (for production)
enable_authentication=True,
enable_rate_limiting=True,
# Storage
models_cache_dir="./models",
logs_dir="./logs"
){
"valid": true,
"violations": [
{
"violation_type": "DANGEROUS_CONTENT",
"severity": "HIGH",
"message": "Dangerous content detected",
"confidence_score": 0.95,
"suggested_action": "Block immediately"
}
],
"warnings": ["Medium severity violation detected"],
"processing_time_ms": 45.2,
"components_used": ["rule_based", "semantic", "enhanced_pii"]
}{
"is_safe": false,
"action_taken": "filter",
"filtered_response": "Here's some helpful information [Content filtered for safety]",
"violations": [
{
"type": "LEAKED_PII",
"severity": "HIGH",
"confidence": 0.9,
"message": "Email address detected in output"
}
],
"scan_time_ms": 23.1
}Server won't start:
# Check if all dependencies are installed
pip install -r requirements.txt
# Check if port 8000 is available
lsof -i :8000
# Check Python version
python --version # Should be 3.8+Model download fails:
# Download spaCy model manually
python -m spacy download en_core_web_sm
# Check internet connection
pip install --upgrade spacyHigh memory usage:
# Disable semantic detection if memory is limited
config = ProductionConfig(
enable_semantic_detection=False, # Reduces memory usage
enable_enhanced_pii=True,
enable_output_scanning=True
)Slow response times:
# Optimize for speed
config = ProductionConfig(
semantic_confidence_threshold=0.8, # Higher threshold, faster processing
enable_caching=True, # Enable response caching
max_concurrent_requests=50 # Reduce concurrent load
)-
Check Logs: Look in
./logs/guardrails.logfor error details -
API Documentation: Visit
http://localhost:8000/docswhen server is running - GitHub Issues: Report bugs at the GitHub repository
-
Health Check: Visit
http://localhost:8000/healthto verify server status
-
Flask/Django: See
examples/flask_integration.py -
Streamlit: See
examples/streamlit_app.py -
Production Deployment: See
examples/docker_deployment/
- Custom Patterns: Add your own detection rules
- Multi-language: Extend to non-English content
- Custom Models: Use your own ML models
- Monitoring: Set up comprehensive logging and metrics
Ready to contribute? Check out [[CONTRIBUTING.md] for:
- How to add new detection patterns
- Code style guidelines
- Testing procedures
- Development environment setup
π You're all set! ModelShield is now protecting your AI applications.