Skip to content

Instantly share code, notes, and snippets.

@phillco
Created August 3, 2024 19:40
Show Gist options
  • Save phillco/644d9470edd61923a81f20bf19125cf9 to your computer and use it in GitHub Desktop.
Save phillco/644d9470edd61923a81f20bf19125cf9 to your computer and use it in GitHub Desktop.
ollama.py
import ollama
from talon.types import Span
OLLAMA_MODEL = "gemma2:2b"
def process_text(text, template):
print(f"Sending text to model: {text[:50]}...")
prompt = template.replace("$text", text)
try:
response = ollama.generate(model=OLLAMA_MODEL, prompt=prompt, keep_alive="24h")
processed_text = response["response"].strip()
print(f"Using prompt:\n{prompt}\n...")
print(f"Model returned processed text:\n{processed_text}\n...")
return processed_text
except ollama.OllamaError as e:
print(f"Error processing text: {e}")
return None
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment