Last active
March 30, 2025 22:10
-
-
Save ochafik/0e0d350344a5f503274d9909c9fe5569 to your computer and use it in GitHub Desktop.
Script to get the GGUF file from an Ollama image for use with llama.cpp's llama-server
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env node | |
/* | |
Gets the file under $OLLAMA_HOME/models/blobs/ for the application/vnd.ollama.image.model key in the manifest | |
- Note that metadata of modelId:modelTag is stored under $OLLAMA_HOME/models/manifests/registry.ollama.ai/library/${modelId}/${modelTag} | |
- You'll need to get the Jinja template from the original model using llama.cpp's scripts/get_chat_template.py script | |
ollama pull qwen2.5-coder:7b | |
llama-server -m $( ./get_ollama_gguf.js qwen2.5-coder:7b ) -fa --jinja --chat-template-file <( ./scripts/get_chat_template.py Qwen/Qwen2.5-Coder-7B-Instruct-GGUF tool_use ) | |
Initially shared here: https://github.com/ggml-org/llama.cpp/pull/9639#issuecomment-2704208342 | |
*/ | |
const fs = require('fs'); | |
const path = require('path'); | |
const HOME = process.env.HOME; | |
const OLLAMA_HOME = process.env.OLLAMA_HOME || path.join(HOME, '.ollama'); | |
const [model] = process.argv.slice(2); | |
if (!model) { | |
console.error('Usage: node get_ollama_gguf.js <modelId:modelTag>'); | |
process.exit(1); | |
} | |
const [modelId, modelTag] = model.split(':'); | |
const manifestFile = path.join(OLLAMA_HOME, 'models', 'manifests', 'registry.ollama.ai', 'library', modelId, modelTag); | |
if (!fs.existsSync(manifestFile)) { | |
console.error(`Manifest file not found for ${modelId}:${modelTag}`); | |
process.exit(1); | |
} | |
const manifest = JSON.parse(fs.readFileSync(manifestFile, 'utf8')); | |
const modelLayer = manifest.layers.find(l => l.mediaType === 'application/vnd.ollama.image.model'); | |
if (!modelLayer) { | |
console.error('Model layer not found'); | |
process.exit(1); | |
} | |
const modelFileName = modelLayer.digest.split(':').join('-'); | |
const modelFile = path.join(OLLAMA_HOME, 'models', 'blobs', modelFileName); | |
if (!fs.existsSync(modelFile)) { | |
console.error(`Model file not found for ${modelId}:${modelTag}`); | |
process.exit(1); | |
} | |
console.log(modelFile); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment