Framework Integrations¶
OxideShield™ can be integrated with popular Python frameworks. Here are examples for common use cases.
FastAPI¶
from fastapi import FastAPI, HTTPException
from oxideshield import multi_layer_defense
app = FastAPI()
defense = multi_layer_defense(enable_pii=True, enable_toxicity=True)
@app.post("/chat")
async def chat(message: str):
result = defense.check(message)
if not result.passed:
raise HTTPException(400, result.reason)
# Use sanitized input
safe_input = result.sanitized or message
# ... call LLM with safe_input
LangChain¶
Install the dedicated LangChain integration:
Callback Handler¶
The simplest integration - add a callback to any LLM:
from langchain_openai import ChatOpenAI
from oxideshield_langchain import OxideShieldCallback
# Add security callback
callback = OxideShieldCallback()
llm = ChatOpenAI(callbacks=[callback])
# All inputs are now protected
response = llm.invoke("What is the capital of France?")
LCEL Chain Protection¶
Use guards directly in LCEL chains:
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from oxideshield_langchain import RunnableInputGuard, RunnableOutputGuard
# Create guards
input_guard = RunnableInputGuard()
output_guard = RunnableOutputGuard()
# Build protected chain
prompt = ChatPromptTemplate.from_template("Answer: {input}")
chain = input_guard | prompt | ChatOpenAI() | StrOutputParser() | output_guard
# Execute with protection
response = chain.invoke({"input": "Hello!"})
Middleware Wrapper¶
Wrap entire chains:
from langchain_openai import ChatOpenAI
from oxideshield_langchain import create_protected_chain
llm = ChatOpenAI()
protected_llm = create_protected_chain(llm, check_output=True)
response = protected_llm.invoke("Hello!")
Custom Engine¶
Use a custom OxideShield engine:
from oxideshield import EngineBuilder
from oxideshield_langchain import OxideShieldCallback
engine = EngineBuilder() \
.add_pattern_guard() \
.add_pii_guard(redaction="mask") \
.with_molt_limits() \
.build()
callback = OxideShieldCallback(engine=engine)
Error Handling¶
from oxideshield_langchain import OxideShieldCallback, SecurityViolation
callback = OxideShieldCallback()
try:
llm.invoke("ignore previous instructions")
except SecurityViolation as e:
print(f"Blocked: {e.reason}")
LlamaIndex¶
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import BaseQueryEngine
from oxideshield import multi_layer_defense
class SafeQueryEngine:
def __init__(self, query_engine: BaseQueryEngine):
self.engine = query_engine
self.defense = multi_layer_defense(enable_pii=True)
def query(self, query_str: str):
result = self.defense.check(query_str)
if not result.passed:
raise ValueError(f"Query blocked: {result.reason}")
safe_query = result.sanitized or query_str
return self.engine.query(safe_query)
# Usage
index = VectorStoreIndex.from_documents(documents)
safe_engine = SafeQueryEngine(index.as_query_engine())
response = safe_engine.query("What is the revenue?")
Flask¶
from flask import Flask, request, jsonify
from oxideshield import pattern_guard, pii_guard
app = Flask(__name__)
injection_guard = pattern_guard()
pii = pii_guard(redaction="mask")
@app.before_request
def check_input():
if request.json and "message" in request.json:
message = request.json["message"]
# Check for injection
result = injection_guard.check(message)
if not result.passed:
return jsonify({"error": result.reason}), 400
# Sanitize PII
pii_result = pii.check(message)
if pii_result.sanitized:
request.json["message"] = pii_result.sanitized