Created
June 9, 2023 14:28
-
-
Save LeninGangwal/f46d0cf5f3b96c4be7b3100c835dc714 to your computer and use it in GitHub Desktop.
Shell GPT using Langchain
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain.llms import OpenAI | |
import os | |
from langchain import LLMChain, PromptTemplate | |
import sys | |
import platform | |
import distro | |
from langchain.memory import ConversationBufferMemory | |
def get_llm(): | |
os.environ["OPENAI_API_KEY"] = "sk-abc" | |
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0) | |
return llm | |
def get_shell(): | |
if os.name == 'nt': # For Windows | |
shell = os.environ.get('COMSPEC', 'Unknown shell') | |
elif os.name == 'posix': | |
shell = os.environ.get('SHELL', 'Unknown shell') | |
else: | |
shell = 'Bash' | |
return shell | |
def get_os(): | |
os_name = platform.system() | |
if os_name == 'Windows': | |
return 'Windows' | |
elif os_name == 'Darwin': | |
return 'Apple macOS' | |
elif os_name == 'Linux': | |
return distro.name(pretty=True) | |
else: | |
return 'Linux' | |
llm = get_llm() | |
PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of shell commands that will perform the task. | |
Ensure the commands are for the OS {os_name} and the shell {shell_name} | |
Make sure to reason step by step, using this format: | |
Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'" | |
I need to take the following actions: | |
- List all files in the directory | |
- Create a new directory | |
- Copy the files from the first directory into the second directory | |
```Command: | |
ls | |
mkdir myNewDirectory | |
cp -r target/* myNewDirectory | |
``` | |
That is the format. Begin! | |
{chat_history} | |
Question: {question} | |
""" | |
PROMPT = PromptTemplate( | |
input_variables=["question", "os_name", "shell_name", "chat_history"], | |
template=PROMPT_TEMPLATE, | |
) | |
memory = ConversationBufferMemory(memory_key="chat_history", input_key="question") | |
llm_chain = LLMChain(prompt=PROMPT, llm=llm, verbose=False, memory=memory) | |
shell_name = get_shell() | |
os_name = get_os() | |
while True: | |
question = input("Please provide a question: ") | |
# print(PROMPT_TEMPLATE.format(question = question, os_name = os_name, shell_name = shell_name, chat_history = "")) | |
print(llm_chain.run({"question": question, "shell_name": shell_name, "os_name": os_name})) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment