Created
September 15, 2023 11:25
-
-
Save jfjensen/07cd0b8f0a922114c14c50dc9b91ddb6 to your computer and use it in GitHub Desktop.
Using Langchain to access an LLM on a vLLM server
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain import PromptTemplate | |
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.chat_models import ChatOpenAI | |
import requests | |
base_url = "http://localhost:9999/v1" | |
x = requests.get(base_url + "/models") | |
model = str(x.json()["data"][0]["id"]) | |
print(f"model: {model}") | |
llm = ChatOpenAI(temperature=0.7, | |
openai_api_key="***", | |
verbose=True, | |
openai_api_base = base_url, | |
model_name = model, | |
max_tokens = 1024) | |
prompt = PromptTemplate( | |
input_variables=["country"], | |
template="What is the capital of {country}?" | |
) | |
message_prompt = HumanMessagePromptTemplate(prompt=prompt) | |
chat_prompt = ChatPromptTemplate.from_messages([message_prompt]) | |
chain = LLMChain(llm=llm, prompt=chat_prompt) | |
print(chain.run("France")) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment