Created
September 15, 2023 11:16
-
-
Save jfjensen/1d5ba4de77f35891fab5f568f7c9e194 to your computer and use it in GitHub Desktop.
Using Langchain to access an LLM on a vLLM server
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.llms import OpenAI | |
import requests | |
base_url = "http://localhost:9999/v1" | |
x = requests.get(base_url + "/models") | |
model = str(x.json()["data"][0]["id"]) | |
print(f"model: {model}") | |
llm = OpenAI(temperature=0.7, | |
frequency_penalty=1.1, | |
openai_api_key="***", | |
verbose=True, | |
openai_api_base = base_url, | |
model_name = model, | |
max_tokens = 1024, | |
logit_bias = None) | |
prompt = PromptTemplate( | |
input_variables=["country"], | |
template="What is the capital of {country}?" | |
) | |
chain = LLMChain(llm=llm, prompt=prompt) | |
print(chain.run("France")) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment