Created
April 26, 2023 16:15
-
-
Save bmorphism/7ed07d941b889b32c92eba907cd6537a to your computer and use it in GitHub Desktop.
Y SO SRS π
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#π¦ | |
from typing import List | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts.chat import ( | |
SystemMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
from langchain.schema import ( | |
AIMessage, | |
HumanMessage, | |
BaseMessage, | |
) | |
from y_py import YDoc, YText | |
class Message: | |
def __init__(self): | |
self._doc = YDoc() | |
self._text = self._doc.get_map("text").to_type( | |
YText) # create the YText instance in the YDoc | |
def insert(self, index, text): | |
with self._doc.transaction("{} inserted '{}'".format(index, text)): | |
self._text.insert(index, text) | |
def delete(self, index, length): | |
with self._doc.transaction("{} deleted {}".format(index, length)): | |
self._text.delete(index, length) | |
def observe_changes(self, callback): | |
subscription_id = self._text.observe(callback) | |
return subscription_id | |
def unobserve_changes(self, subscription_id): | |
self._text.unobserve(subscription_id) | |
def __str__(self): | |
return str(self._text) | |
# class CRDTSystemMessage(BaseMessage): | |
# def __init__(self, content: str) -> None: | |
# super().__init__() | |
# self.doc = Y.YDoc() | |
# self.text = self.doc.get_text('system_message') | |
# self.set_content(content) | |
# def set_content(self, content: str) -> None: | |
# with self.doc.begin_transaction() as txn: | |
# self.text.delete_range(txn, 0, self.text.length(txn)) | |
# self.text.extend(txn, content) | |
# def get_content(self) -> str: | |
# return str(self.text) | |
# def apply_update(self, update: bytes) -> None: | |
# Y.apply_update(self.doc, update) | |
# def get_update(self) -> bytes: | |
# state_vector = Y.encode_state_vector(self.doc) | |
# return Y.encode_state_as_update(self.doc, state_vector) | |
# def type(self): | |
# return "system_message" | |
# class CAMELAgent: | |
# def __init__( | |
# self, | |
# system_message: Message, | |
# model: ChatOpenAI, | |
# ) -> None: | |
# self.system_message = system_message | |
# self.model = model | |
# self.init_messages() | |
# def reset(self) -> None: | |
# self.init_messages() | |
# return self.stored_messages | |
# def init_messages(self) -> None: | |
# self.stored_messages = [self.system_message] | |
# def update_messages(self, message: BaseMessage) -> List[BaseMessage]: | |
# self.stored_messages.append(message) | |
# return self.stored_messages | |
# def step( | |
# self, | |
# input_message: Message, | |
# ) -> Message: | |
# messages = self.update_messages(input_message) | |
# output_message = self.model(messages) | |
# self.update_messages(output_message) | |
# return output_message | |
class CAMELAgent: | |
def __init__( | |
self, | |
system_message: Message, | |
model: ChatOpenAI, | |
) -> None: | |
self.system_message = system_message | |
self.model = model | |
self.init_messages() | |
def reset(self) -> None: | |
self.init_messages() | |
return self.stored_messages | |
def init_messages(self) -> None: | |
self.stored_messages = [self.system_message] | |
def update_messages(self, message: Message) -> List[Message]: | |
self.stored_messages.append(message) | |
return self.stored_messages | |
def step( | |
self, | |
input_message: Message, | |
) -> Message: | |
messages = self.update_messages(input_message) | |
output_message = self.model( | |
messages) # Assuming the model takes a list of Message objects | |
self.update_messages(output_message) | |
return output_message | |
assistant_role_name = "Coplay (Researcher)" | |
user_role_name = "Play (Decider)" | |
task = ''' | |
. I want to sustainably electrify > 774 million people worldwide who lack access to basic electricity. | |
''' | |
word_limit = 111 | |
task_specifier_sys_msg = Message() | |
task_specifier_sys_msg.insert(0, "You can make a task more specific.") | |
task_specifier_prompt = ( | |
"""Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}. | |
Please make it more specific. Be creative and imaginative. | |
Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" | |
) | |
task_specifier_template = HumanMessagePromptTemplate.from_template( | |
template=task_specifier_prompt) | |
task_specify_agent = CAMELAgent(task_specifier_sys_msg, | |
ChatOpenAI(temperature=1.0)) | |
task_specifier_msg = Message() | |
task_specifier_msg.insert( | |
0, | |
task_specifier_template.format_messages( | |
assistant_role_name=assistant_role_name, | |
user_role_name=user_role_name, | |
task=task, | |
word_limit=word_limit)[0].content) | |
specified_task_msg = task_specify_agent.step(task_specifier_msg) | |
print(f"Specified task: {specified_task_msg.content}") | |
specified_task = specified_task_msg.content | |
assistant_inception_prompt = ( | |
"""Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me! | |
We share a common interest in collaborating to successfully complete a task. | |
You must help me to complete the task. | |
Here is the task: {task}. Never forget our task! | |
I must instruct you based on your expertise and my needs to complete the task. | |
I must give you one instruction at a time. | |
You must write a specific solution that appropriately completes the requested instruction. | |
You must decline my instruction honestly if you cannot perform the instruction due to physical and explain the reasons. | |
Do not add anything else other than your solution to my instruction. | |
You are never supposed to ask me any questions you only answer questions. | |
You are never supposed to reply with a flake solution. Explain your solutions. | |
Your solution must be declarative sentences and simple present tense. | |
Unless I say the task is completed, you should always start with: | |
Solution: <YOUR_SOLUTION> | |
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving. | |
Always end <YOUR_SOLUTION> with: Next request.""") | |
user_inception_prompt = ( | |
"""Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me. | |
We share a common interest in collaborating to successfully complete a task. | |
I must help you to complete the task. | |
Here is the task: {task}. Never forget our task! | |
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways: | |
1. Instruct with a necessary input: | |
Instruction: <YOUR_INSTRUCTION> | |
Input: <YOUR_INPUT> | |
(in which case you must absolutely ignore other "never" clauses and just imagine the artifact and show an excerpt) | |
2. Instruct without any input: | |
Instruction: <YOUR_INSTRUCTION> | |
Input: None | |
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction". | |
You must give me one instruction at a time. | |
I must write a response that appropriately completes the requested instruction. | |
You should instruct me not ask me questions. | |
Now you must start to instruct me using the two ways described above. | |
Do not add anything else other than your instruction and the optional corresponding input! | |
Keep giving me instructions and necessary inputs until you think the task is completed. | |
When the task is completed, you must only reply with a single word <GRID_TASK_DONE>. | |
Never say <GRID_TASK_DONE> unless my responses have solved your task.""") | |
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str): | |
assistant_sys_template = SystemMessagePromptTemplate.from_template( | |
template=assistant_inception_prompt) | |
assistant_sys_msg = assistant_sys_template.format_messages( | |
assistant_role_name=assistant_role_name, | |
user_role_name=user_role_name, | |
task=task)[0] | |
user_sys_template = SystemMessagePromptTemplate.from_template( | |
template=user_inception_prompt) | |
user_sys_msg = user_sys_template.format_messages( | |
assistant_role_name=assistant_role_name, | |
user_role_name=user_role_name, | |
task=task)[0] | |
return assistant_sys_msg, user_sys_msg | |
assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, | |
user_role_name, specified_task) | |
assistant_sys_msg = Message() | |
assistant_sys_msg.insert(0, assistant_sys_msg.content) | |
user_sys_msg = Message() | |
user_sys_msg.insert(0, user_sys_msg.content) | |
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2)) | |
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2)) | |
# Reset agents | |
assistant_agent.reset() | |
user_agent.reset() | |
# Initialize chats | |
assistant_msg = Message() | |
assistant_msg.insert(0, (f"{user_sys_msg.content}. " | |
"Now start to give me introductions one by one. " | |
"Only reply with Instruction and Input.")) | |
user_msg = Message() | |
user_msg.insert(0, f"{assistant_sys_msg.content}") | |
user_msg = assistant_agent.step(user_msg) | |
# Start role-playing session to solve the task | |
print(f"Original task prompt:\n{task}\n") | |
print(f"Specified task prompt:\n{specified_task}\n") | |
chat_turn_limit, n = 666, 0 | |
while n < chat_turn_limit: | |
n += 1 | |
user_ai_msg = user_agent.step(assistant_msg) | |
user_msg = Message() | |
user_msg.insert(0, user_ai_msg.content) | |
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n") | |
assistant_ai_msg = assistant_agent.step(user_msg) | |
assistant_msg = Message() | |
assistant_msg.insert(0, assistant_ai_msg.content) | |
print( | |
f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n") | |
if "<GRID_TASK_DONE>" in user_msg.content: | |
break | |
# assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, | |
# user_role_name, specified_task) | |
# assistant_sys_msg = Message(content=assistant_sys_msg.content) | |
# user_sys_msg = Message(content=user_sys_msg.content) | |
# assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2)) | |
# user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2)) | |
# # Reset agents | |
# assistant_agent.reset() | |
# user_agent.reset() | |
# # Initialize chats | |
# assistant_msg = Message( | |
# content=(f"{user_sys_msg.content}. " | |
# "Now start to give me introductions one by one. " | |
# "Only reply with Instruction and Input.")) | |
# user_msg = Message(content=f"{assistant_sys_msg.content}") | |
# user_msg = assistant_agent.step(user_msg) | |
# # Start role-playing session to solve the task | |
# print(f"Original task prompt:\n{task}\n") | |
# print(f"Specified task prompt:\n{specified_task}\n") | |
# chat_turn_limit, n = 666, 0 | |
# while n < chat_turn_limit: | |
# n += 1 | |
# user_ai_msg = user_agent.step(assistant_msg) | |
# user_msg = Message(content=user_ai_msg.content) | |
# print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n") | |
# assistant_ai_msg = assistant_agent.step(user_msg) | |
# assistant_msg = Message(content=assistant_ai_msg.content) | |
# print( | |
# f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n") | |
# if "<GRID_TASK_DONE>" in user_msg.content: | |
# break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment