Created
February 12, 2025 21:15
-
-
Save hoonsubin/1a9347eccaa30940cf92d317ef04df9f to your computer and use it in GitHub Desktop.
A simple Langfuse integration script to work with OpenAI-compatible LLM APIs (I'm using a local LM Studio dev server in this case)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langfuse.openai import openai | |
import os | |
def init_connection(): | |
public_key = ".." | |
private_key = ".." | |
os.environ["LANGFUSE_PUBLIC_KEY"] = public_key | |
os.environ["LANGFUSE_SECRET_KEY"] = private_key | |
os.environ["LANGFUSE_HOST"] = "http://localhost:3000" # For self-hosted | |
def init_llm(): | |
local_model = "claudegpt-code-logic-debugger-v0.1" # or whatever model you have | |
llm_client = openai.OpenAI(base_url="http://127.0.0.1:1234/v1", api_key="lm-studio") | |
completion = llm_client.chat.completions.create( | |
model=local_model, # Replace with your loaded model's identifier | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": "What is the origin of hello world?"} | |
], | |
temperature=0.7 | |
) | |
print(completion.choices[0].message.content) | |
if __name__ == '__main__': | |
init_connection() | |
init_llm() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment