Created
December 17, 2024 00:35
-
-
Save youngsoul/cdffa0159cd8b18edf24bf39222ffeca to your computer and use it in GitHub Desktop.
llama and OpenAI Pydantic Output Parser example
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# %% | |
from pprint import pp | |
from pydantic import BaseModel, Field | |
from langchain_core.prompts import PromptTemplate | |
from langchain_ollama import ChatOllama | |
from langchain_openai import ChatOpenAI | |
from langchain_core.output_parsers import PydanticOutputParser | |
from langchain_core.output_parsers import StrOutputParser | |
from typing import Optional | |
import json | |
from dotenv import load_dotenv | |
# %% | |
load_dotenv("../.env", override=True) | |
# %% | |
class Movie(BaseModel): | |
title: str = Field(description="The title of the movie.") | |
year: Optional[int] = Field(None, description="The year the movie was released (optional).") | |
director: Optional[str] = Field(None, description="The name of the director (optional).") | |
# rating: Optional[float] = Field(default=None, ge=0, le=10, description="Optional rating of the movie.") | |
# %% | |
# OpenAI Works | |
# llm = ChatOpenAI() | |
# llama3.1 does not work always | |
#llm = ChatOllama(model="llama3.1", format="json") | |
# llama3.2 does not work always | |
#llm = ChatOllama(model="llama3.2", format="json") | |
# llama3.3 works | |
llm = ChatOllama(model="llama3.3", format="json") | |
# %% | |
output_parser = PydanticOutputParser(pydantic_object=Movie) | |
# %% | |
# print(output_parser.get_format_instructions()) | |
# %% | |
# add a query intended to prompt a llm to populate the model | |
movie_query = PromptTemplate( | |
input_variables=["title"], | |
template="Answer the user query.\n{format_instructions}\nYou must return the JSON structure described above.\nWhat are the details of the movie with the title: {title}?\n", | |
partial_variables={"format_instructions": output_parser.get_format_instructions()} | |
) | |
# %% | |
# print(movie_query.format(title="The Matrix")) | |
# %% | |
# pp(movie_query.dict()) | |
# %% | |
chain = movie_query | llm | output_parser | |
# %% | |
print("invoke the chain with:") | |
response = chain.invoke({"title": "The Matrix"}) | |
# %% | |
print(response) | |
print(type(response)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment