Using local LLMs anywhere (in text editor) - example below with Obsidian
inspired by and adopted from LLM-automator.
Code example with
mixtral
from fasthtml import common as fh | |
def df2fhtml(df: pd.DataFrame, with_headers: bool=True, **kwargs): | |
cols = df.columns | |
header = fh.Tr(*[fh.Th(fh.Label(col)) for col in cols]) | |
rows = [fh.Tr(*[fh.Td(df[col][i]) for col in cols]) for i in range(len(df))] | |
return fh.Table(header if with_headers else '', *rows, **kwargs) |
import torch | |
from langchain_community.embeddings import ( | |
OpenAIEmbeddings, | |
OllamaEmbeddings, | |
HuggingFaceEmbeddings, | |
HuggingFaceBgeEmbeddings | |
) | |
def embedding_func(selected_embedding: str = "HuggingFaceEmbeddings"): | |
""" |
import os | |
import json | |
import datetime | |
import streamlit as st | |
from llama_index.llms import Ollama | |
from llama_index.llms import ChatMessage | |
# https://docs.llamaindex.ai/en/stable/examples/llm/ollama.html | |
import sys | |
from io import StringIO | |
import streamlit as st # pip install streamlit | |
from code_editor import code_editor # pip install streamlit_code_editor | |
import ollama as ol # pip install ollama | |
st.set_page_config(layout='wide') | |
st.title('`Offline code completion`') |
""" To use: install Ollama (or LLM studio), clone OpenVoice, run this script in the OpenVoice directory | |
git clone https://github.com/myshell-ai/OpenVoice | |
cd OpenVoice | |
git clone https://huggingface.co/myshell-ai/OpenVoice | |
cp -r OpenVoice/* . | |
pip install whisper pynput pyaudio streamlit ollama | |
script source: https://x.com/Thom_Wolf/status/1758140066285658351?s=20 | |
""" |
Using local LLMs anywhere (in text editor) - example below with Obsidian
inspired by and adopted from LLM-automator.
Code example with
mixtral