Skip to content

Instantly share code, notes, and snippets.

@bsharper
Last active April 8, 2025 03:56
Show Gist options
  • Save bsharper/03324debaa24b355d6040b8c959bc087 to your computer and use it in GitHub Desktop.
Save bsharper/03324debaa24b355d6040b8c959bc087 to your computer and use it in GitHub Desktop.
Map ollama models to normal filenames using symbolic links (Linux / macOS / Windows)
# Run without arguments to see what is found.
# Run with a path as an argument to create links to ollama models there.
# This will remove any files that follow the exact filename as the new link file, so use with caution!
import os
import sys
import json
import platform
def get_ollama_model_path():
# Check if OLLAMA_MODELS environment variable is set
env_model_path = os.environ.get('OLLAMA_MODELS')
if env_model_path:
return env_model_path
# Determine the model path based on the operating system
system = platform.system()
if system == 'Darwin': # macOS
return os.path.join(os.path.expanduser('~'), '.ollama', 'models')
elif system == 'Linux':
return '/usr/share/ollama/.ollama/models'
elif system == 'Windows':
return os.path.join(os.environ['USERPROFILE'], '.ollama', 'models')
else:
raise Exception('Unsupported Operating System')
def extract_digests(json_record):
result = {}
for layer in json_record.get('layers', []):
media_type = layer.get('mediaType')
digest = layer.get('digest')
if media_type == "application/vnd.ollama.image.model":
result['gguf'] = digest
elif media_type == "application/vnd.ollama.image.projector":
result['mmproj'] = digest
return result
def search_for_models(directory):
models = {}
models_map = {}
blobs = {}
is_windows = platform.system() == "Windows"
for root, dirs, files in os.walk(directory):
for file in files:
full_path = os.path.join(root, file)
sz = os.path.getsize(full_path)
if root.endswith("blobs"):
blobs[file.replace("sha256-", "sha256:")] = full_path
if (file == "latest" or ("registry.ollama.ai" in full_path and "library" in full_path)) and sz < 2048:
els = full_path.split(os.sep)
model_name = f"{els[-2]}-{els[-1]}"
j = json.load(open(full_path))
models[model_name] = extract_digests(j)
for model_name in models.keys():
model = models[model_name]
obj = model
try:
obj = {x: blobs[model[x]] for x in model.keys()}
except KeyError as ex:
print (ex)
print ("This likely means that a manifest is pointing to a file that does not exist")
models_map[model_name] = obj
return models_map
def generate_link_pairs(models_map, target_path=""):
if target_path:
target_path = os.path.expanduser(target_path)
links = []
for model_name in models_map:
model = models_map[model_name]
for file_type in model:
file_path = model[file_type]
filename = f"{model_name}.{file_type}"
if target_path:
filename = os.path.join(target_path, filename)
links.append({'target': file_path, 'linkpath': filename})
return links
def print_link_script(links):
is_windows = platform.system() == "Windows"
for link in links:
if is_windows:
print (f"mklink '{link['linkpath']}' '{link['target']}'")
else:
print (f"ln -s '{link['target']}' '{link['linkpath']}'")
def create_links(links):
for link in links:
linkpath = link['linkpath']
target = link['target']
print (f'Creating link "{linkpath}" => "{target}"')
if not os.path.exists(target):
print (f'Skipping "{target}", file does not exist')
continue
if os.path.exists(link['linkpath']) or os.path.islink(link['linkpath']):
os.unlink(linkpath)
os.symlink(link['target'], link['linkpath'])
def header(ollama_path, link_path=""):
width = 60
print ("="*width)
print (f"Ollama models path : {ollama_path}")
if link_path:
link_path = os.path.expanduser(link_path)
print (f"Link path : {link_path}")
print ("="*width)
if __name__ == "__main__":
args = sys.argv[1:]
link_path=""
if len(args) > 0:
if args[0] == "-h" or args[0] == "--help":
bn = os.path.basename(sys.argv[0])
print (f"Usage: python {bn} ../some_path")
print ("")
print ("Creates symbolic links to the models downloaded by ollama")
print ("Run without any arguments to see the models it will process")
sys.exit(0)
link_path = args[0]
if not os.path.exists(link_path):
print ('Error: provided path "{link_path}" does not exist')
ollama_path = get_ollama_model_path()
header(ollama_path, link_path)
models_map = search_for_models(ollama_path)
links = generate_link_pairs(models_map, link_path)
if link_path:
create_links(links)
else:
print_link_script(links)
@bsharper
Copy link
Author

I use this to test run llama.cpp against models downloaded with ollama.

Example usage

  1. Create a subdirectory under llama.cpp called ollama_models
  2. Run the script above pointing at this new subdirectory python map_models.py ~/llama.cpp/ollama_models
  3. Test normal llama.cpp: ./main -m models/ollama_models/gemma-2b-instruct.gguf -p 'What do you do with a drunken sailor?'
  4. Or image description ./llava-cli -m ollama_models/bakllava-latest.gguf --mmproj ollama_models/bakllava-latest.mmproj --image ./some_image_file.jpg

map_models.py is simple but works for me and is easy to re-run to add any new models.

@bsharper
Copy link
Author

Updated to follow new ollama file name schema

@i486
Copy link

i486 commented Jul 6, 2024

Hi.

Do you know if anyone has developed something that does opposite of this?

I have hundreds of GGUF files in my harddrive and would like to import (symlink) them automatically into Ollama.

@bsharper
Copy link
Author

I don't know of anything but I think ollama model files just need a FROM field followed by the path to the GGUF file. I just played around with it and you can basically create a file called Modelfile that contains just FROM YourModelFileName.gguf then type ollama create YourModelName.

But... it will create a copy of it using ollama's storage scheme, so it might not be ideal if you have a lot of files.

@ChadwickHill
Copy link

Thank you! This script helped me recover a significant amount of storage from failed 'ollama create....' with various GGUF files.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment