This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import instructor | |
from openai import OpenAI | |
from pydantic import BaseModel | |
# This enables response_model keyword | |
# from client.chat.completions.create | |
client = instructor.patch(OpenAI()) | |
class UserDetail(BaseModel): | |
name: str |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
llm = OpenAI(temperature =0) | |
class Action(BaseModel): | |
action: str = Field(description="action to take") | |
action_input: str = Field(description="input to the action") | |
parser = PydanticOutputParser(pydantic_object=Action) | |
# the parser provides natural language formatting instruction for use in LLM. | |
# The process is centered around prompts. | |
prompt = PromptTemplate(template="Answer the user query. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@ai_fn | |
def generate_recipe(ingredients: list[str]) -> list[str]: | |
"""From a list of `ingredients`, generates a | |
complete instruction set to cook a recipe. | |
""" | |
generate_recipe(["lemon", "chicken", "olives", "coucous"]) | |
generate_recipe.prompt( | |
"I need a recipe using Lemon Chicken Olives and Coucous" | |
) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class BasicQA(dspy.Signature): | |
"""Answer questions with short factoid answers.""" | |
question = dspy.InputField() | |
answer = dspy.OutputField(desc="often between 1 and 5 words") | |
# Define the predictor. | |
generate_answer = dspy.Predict(BasicQA) | |
# Call the predictor on a particular input. | |
pred = generate_answer(question=dev_example.question) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Define the predictor. Notice we're just changing the class. The signature BasicQA is unchanged. | |
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) | |
# Call the predictor on the same input. | |
pred = generate_answer_with_chain_of_thought(question=dev_example.question) | |
# Print the input, the chain of thought, and the prediction. | |
print(f"Question: {dev_example.question}") | |
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") | |
print(f"Predicted Answer: {pred.answer}") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from dspy.teleprompt import BootstrapFewShot | |
class GenerateAnswer(dspy.Signature): | |
"""Answer questions with short factoid answers.""" | |
context = dspy.InputField(desc="may contain relevant facts") | |
question = dspy.InputField() | |
answer = dspy.OutputField(desc="often between 1 and 5 words") | |
class RAG(dspy.Module): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# the prompt is pulled from the LangSmith Hub which hosts many different prompts | |
prompt = hub.pull("hwchase17/self-ask-with-search") | |
llm = OpenAI(temperature=0) | |
# provide the LM with useful tools | |
search = SerpAPIWrapper() | |
tools = Tool( name="Intermediate Answer", func=search.run, description="useful for when you need to ask with search")] | |
llm_with_stop = llm.bind(stop=["\nIntermediate answer:"]) | |
agent = ( |
OlderNewer