Skip to content

Instantly share code, notes, and snippets.

@prokizzle
Last active March 26, 2026 22:39
Show Gist options
  • Select an option

  • Save prokizzle/ee6b45ce0c1c7ba76ecd131f6fbf467c to your computer and use it in GitHub Desktop.

Select an option

Save prokizzle/ee6b45ce0c1c7ba76ecd131f6fbf467c to your computer and use it in GitHub Desktop.
OmniLottie RunPod serverless handler
"""
RunPod serverless handler for OmniLottie — text-to-Lottie animation generation.
Runs inference_hf.py as a subprocess to avoid import issues.
"""
import json
import os
import subprocess
import runpod
import tempfile
def handler(job):
"""RunPod serverless handler."""
job_input = job["input"]
prompt = job_input.get("prompt", "")
if not prompt:
return {"error": "No prompt provided"}
model_path = os.environ.get("MODEL_PATH", "OmniLottie/OmniLottie")
omnilottie_dir = "/app/OmniLottie"
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmp:
output_path = tmp.name
try:
cmd = [
"python3", f"{omnilottie_dir}/inference_hf.py",
"--model_path", model_path,
"--text", prompt,
"--output", output_path,
]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=300,
cwd=omnilottie_dir,
)
if result.returncode != 0:
return {
"error": f"Inference failed: {result.stderr[-500:]}",
"prompt": prompt,
}
with open(output_path, "r") as f:
lottie_json = json.load(f)
return {
"lottie_json": json.dumps(lottie_json),
"prompt": prompt,
"layers": len(lottie_json.get("layers", [])),
"size": f"{lottie_json.get('w', 0)}x{lottie_json.get('h', 0)}",
}
except subprocess.TimeoutExpired:
return {"error": "Inference timed out (300s)", "prompt": prompt}
except Exception as e:
return {"error": str(e), "prompt": prompt}
finally:
os.unlink(output_path) if os.path.exists(output_path) else None
runpod.serverless.start({"handler": handler})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment