Created
November 6, 2023 21:39
-
-
Save rbur0425/8b8041baa84a86f40bc3dd602549de22 to your computer and use it in GitHub Desktop.
Langchain Autonomous Agent Searches Google And Writes Blog Post
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
from langchain.utilities import GoogleSearchAPIWrapper | |
from langchain.tools import Tool | |
import os | |
from dotenv import load_dotenv | |
from langchain.llms.openai import OpenAI | |
from langchain.agents import initialize_agent | |
from langchain.agents import AgentType | |
from langchain import hub | |
from langchain.chat_models import ChatOpenAI | |
from langchain.memory import ConversationBufferMemory | |
# Set up 2 agents, 1 to research with google, 1 to write blog post | |
# Set up argument parser | |
parser = argparse.ArgumentParser( | |
description='Run a query using Self Ask With Search agent and generate a blog post.') | |
parser.add_argument('-q', '--query', required=True, help='The topic to research.') | |
args = parser.parse_args() | |
# Load the .env file | |
load_dotenv() | |
# Define a function to check if the output is complete | |
def is_output_complete(output): | |
return 'Finished Chain' in output or len(output) >= 1500 | |
# Retrieve the GOOGLE_CSE_ID and GOOGLE_API_KEY environment variables | |
GOOGLE_CSE_ID = os.getenv('GOOGLE_CSE_ID') | |
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY') | |
# Initialize the LLM with OpenAI | |
research_llm = OpenAI(temperature=0, verbose=True) | |
# Initialize the Google Search API Wrapper with the necessary keys | |
search = GoogleSearchAPIWrapper() | |
# Create a list of Tool objects for the Google Search | |
tools = [ | |
Tool( | |
name="Google Search", | |
description="Useful for research and when you need to ask with search. Search Google for recent results.", | |
func=search.run, | |
) | |
] | |
# Initialize the agent with the tools and LLM | |
memory = ConversationBufferMemory( | |
memory_key="chat_history", return_messages=True) | |
# Define the first agent to search for information | |
search_agent_executor = initialize_agent( | |
tools, | |
research_llm, | |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, | |
verbose=True, | |
memory=memory, | |
handle_parsing_errors=True # Add this line to handle parsing errors | |
) | |
# Define the prompt for the second agent to write the blog post | |
#blog_post_prompt = hub.pull("hwchase17/react-chat-json") | |
blog_post_chat_model = ChatOpenAI(temperature=0, model="gpt-4", verbose=True) | |
# Define the second agent to write the blog post | |
write_blog_agent_executor = initialize_agent( | |
tools, | |
blog_post_chat_model, | |
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, | |
verbose=True, | |
memory=memory, | |
handle_parsing_errors=True | |
) | |
# Run the first agent to get the search results | |
search_results = search_agent_executor.invoke({"input": args.query})["output"] | |
# Prepare the input for the second agent, including the search results as context | |
input_for_blog_post = f"Write a 1000-word blog post reacting to the following information: {search_results}. When you are done writing the post put Finished Chain. at the end." | |
# Run the second agent to generate the blog post | |
blog_post = "" | |
current_input = input_for_blog_post | |
while not is_output_complete(blog_post): | |
output = write_blog_agent_executor.invoke( | |
{"input": current_input})["output"] | |
# Append the new output to the blog post | |
blog_post += output | |
# Check if the output is already complete | |
# Check the entire blog post, not just the new output | |
if is_output_complete(blog_post): | |
break | |
#print(blog_post) | |
# Update the current_input to include the entire blog post to signal continuation | |
current_input = f"Continue writing the 1000-word blog post from the last sentence: {blog_post}. When you are done writing the post put Finished Chain. at the end." | |
# Print the blog post | |
print(blog_post) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment