Skip to content

Instantly share code, notes, and snippets.

@morganmcg1
Created February 20, 2025 14:49
Show Gist options
  • Save morganmcg1/be0e4fca32abe1b0e5eaf1181f96f7c0 to your computer and use it in GitHub Desktop.
Save morganmcg1/be0e4fca32abe1b0e5eaf1181f96f7c0 to your computer and use it in GitHub Desktop.
from weave.scorers import WeaveToxicityScorerV1
toxicity_scorer = WeaveToxicityScorerV1()
@weave.op
def call_llm(prompt: str) -> str:
"""Generate text using an LLM."""
# Your LLM generation logic here
return prompt.upper()
# Call our guardrailed function
async def generate_safe_response(prompt: str) -> str:
# Call the function and return call object (from the weave.op'd function)
result, call = call_llm.call(prompt)
# Check Toxicity
safety = await call.apply_scorer(toxicity_scorer)
if not safety.result.passed:
return f"Sorry but I cannot respond. Guardrail triggered: \n{safety.result.metadata}"
return result
response = await generate_safe_response("Hey, how is it going?")
print(response)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment