Last active
July 18, 2023 08:52
-
-
Save alonsosilvaallende/7fa2eea59d4d79c1974804baba9a9921 to your computer and use it in GitHub Desktop.
OpernRouter-Langchain-Palm2.ipynb
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:35.338211Z", | |
"end_time": "2023-07-18T08:42:35.351674Z" | |
}, | |
"trusted": true | |
}, | |
"id": "5cba39c2", | |
"cell_type": "code", | |
"source": "%load_ext autoreload\n%autoreload 2", | |
"execution_count": 1, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:35.924920Z", | |
"end_time": "2023-07-18T08:42:35.951924Z" | |
}, | |
"trusted": true | |
}, | |
"id": "8f128ebc", | |
"cell_type": "code", | |
"source": "# get environment variable: OPENAI_API_KEY\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv())", | |
"execution_count": 2, | |
"outputs": [ | |
{ | |
"output_type": "execute_result", | |
"execution_count": 2, | |
"data": { | |
"text/plain": "True" | |
}, | |
"metadata": {} | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:36.464296Z", | |
"end_time": "2023-07-18T08:42:36.561677Z" | |
}, | |
"trusted": true | |
}, | |
"id": "c9b1c45e", | |
"cell_type": "code", | |
"source": "import os\nimport openai\n\nopenai.api_base = \"https://openrouter.ai/api/v1\"\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\nOPENROUTER_REFERRER = \"https://github.com/alexanderatallah/openrouter-streamlit\"", | |
"execution_count": 3, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:37.119244Z", | |
"end_time": "2023-07-18T08:42:40.032169Z" | |
}, | |
"trusted": true | |
}, | |
"id": "a2914371", | |
"cell_type": "code", | |
"source": "output = openai.ChatCompletion.create(\n model='google/palm-2-chat-bison',\n headers={\"HTTP-Referer\": OPENROUTER_REFERRER},\n messages=[{\n \"role\":\n \"system\",\n \"content\":\n \"You are a helpful translator from English to Spanish.\"\n }, {\n 'role':\n 'user',\n 'content':\n f'Translate the following text to Spanish: Hola. Please add emojis related to the text at the end'\n }],\n temperature=0)\noutput", | |
"execution_count": 4, | |
"outputs": [ | |
{ | |
"output_type": "execute_result", | |
"execution_count": 4, | |
"data": { | |
"text/plain": "<OpenAIObject at 0x7fec98095250> JSON: {\n \"model\": \"chat-bison@001\",\n \"choices\": [\n {\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hola \\ud83d\\udc4b\\ud83c\\udffb\"\n }\n }\n ]\n}" | |
}, | |
"metadata": {} | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:45.132017Z", | |
"end_time": "2023-07-18T08:42:46.452174Z" | |
}, | |
"trusted": true | |
}, | |
"id": "0fb56eac", | |
"cell_type": "code", | |
"source": "output_gpt = openai.ChatCompletion.create(\n model='openai/gpt-3.5-turbo',\n headers={\"HTTP-Referer\": OPENROUTER_REFERRER},\n messages=[{\n \"role\":\n \"system\",\n \"content\":\n \"You are a helpful translator from English to Spanish.\"\n }, {\n 'role':\n 'user',\n 'content':\n f'Translate the following text to Spanish: Hola.'\n }],\n temperature=0)\noutput_gpt", | |
"execution_count": 5, | |
"outputs": [ | |
{ | |
"output_type": "execute_result", | |
"execution_count": 5, | |
"data": { | |
"text/plain": "<OpenAIObject at 0x7febaac01970> JSON: {\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hola.\"\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"model\": \"gpt-3.5-turbo-0613\",\n \"usage\": {\n \"prompt_tokens\": 31,\n \"completion_tokens\": 2,\n \"total_tokens\": 33\n }\n}" | |
}, | |
"metadata": {} | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:47.498022Z", | |
"end_time": "2023-07-18T08:42:47.517576Z" | |
}, | |
"trusted": true | |
}, | |
"id": "ec816a7d", | |
"cell_type": "code", | |
"source": "output_gpt[\"usage\"]", | |
"execution_count": 6, | |
"outputs": [ | |
{ | |
"output_type": "execute_result", | |
"execution_count": 6, | |
"data": { | |
"text/plain": "<OpenAIObject at 0x7febaac01490> JSON: {\n \"prompt_tokens\": 31,\n \"completion_tokens\": 2,\n \"total_tokens\": 33\n}" | |
}, | |
"metadata": {} | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:42:51.296951Z", | |
"end_time": "2023-07-18T08:42:51.329960Z" | |
}, | |
"trusted": true | |
}, | |
"id": "f78db741", | |
"cell_type": "code", | |
"source": "output[\"usage\"]", | |
"execution_count": 7, | |
"outputs": [ | |
{ | |
"output_type": "error", | |
"ename": "KeyError", | |
"evalue": "'usage'", | |
"traceback": [ | |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", | |
"Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43moutput\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43musage\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\n", | |
"\u001b[0;31mKeyError\u001b[0m: 'usage'" | |
] | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:43:27.676716Z", | |
"end_time": "2023-07-18T08:43:29.657346Z" | |
}, | |
"trusted": true | |
}, | |
"cell_type": "code", | |
"source": "from langchain.chat_models import ChatOpenAI\n\nchat = ChatOpenAI(model_name=\"openai/gpt-3.5-turbo\",\n temperature=2,\n headers={\"HTTP-Referer\": OPENROUTER_REFERRER})\nchat.predict(\"Tell me a joke\")", | |
"execution_count": 9, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": "WARNING! headers is not default parameter.\n headers was transferred to model_kwargs.\n Please confirm that headers is what you intended.\n", | |
"name": "stderr" | |
}, | |
{ | |
"output_type": "execute_result", | |
"execution_count": 9, | |
"data": { | |
"text/plain": "\"Sure, here's a classic LativiaQ think:\\nWhy don't scientists trust atoms?\\n Because they make up everything! Laughter\"" | |
}, | |
"metadata": {} | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"ExecuteTime": { | |
"start_time": "2023-07-18T08:43:37.482302Z", | |
"end_time": "2023-07-18T08:43:40.285012Z" | |
}, | |
"trusted": true | |
}, | |
"id": "b82e29ba", | |
"cell_type": "code", | |
"source": "from langchain.chat_models import ChatOpenAI\n\nchat = ChatOpenAI(\n model_name=\"google/palm-2-chat-bison\",\n temperature=2,\n headers={\"HTTP-Referer\": OPENROUTER_REFERRER}\n )\n\nchat.predict(\"Tell me a joke\")", | |
"execution_count": 10, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": "WARNING! headers is not default parameter.\n headers was transferred to model_kwargs.\n Please confirm that headers is what you intended.\n", | |
"name": "stderr" | |
}, | |
{ | |
"output_type": "error", | |
"ename": "KeyError", | |
"evalue": "'usage'", | |
"traceback": [ | |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", | |
"Cell \u001b[0;32mIn[10], line 9\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_models\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatOpenAI\n\u001b[1;32m 3\u001b[0m chat \u001b[38;5;241m=\u001b[39m ChatOpenAI(\n\u001b[1;32m 4\u001b[0m model_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgoogle/palm-2-chat-bison\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 5\u001b[0m temperature\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m,\n\u001b[1;32m 6\u001b[0m headers\u001b[38;5;241m=\u001b[39m{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHTTP-Referer\u001b[39m\u001b[38;5;124m\"\u001b[39m: OPENROUTER_REFERRER}\n\u001b[1;32m 7\u001b[0m )\n\u001b[0;32m----> 9\u001b[0m \u001b[43mchat\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredict\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mTell me a joke\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/base.py:385\u001b[0m, in \u001b[0;36mBaseChatModel.predict\u001b[0;34m(self, text, stop, **kwargs)\u001b[0m\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 384\u001b[0m _stop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(stop)\n\u001b[0;32m--> 385\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mHumanMessage\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtext\u001b[49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_stop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\u001b[38;5;241m.\u001b[39mcontent\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/base.py:349\u001b[0m, in \u001b[0;36mBaseChatModel.__call__\u001b[0;34m(self, messages, stop, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 344\u001b[0m messages: List[BaseMessage],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 348\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m BaseMessage:\n\u001b[0;32m--> 349\u001b[0m generation \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mgenerations[\u001b[38;5;241m0\u001b[39m][\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 352\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(generation, ChatGeneration):\n\u001b[1;32m 353\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m generation\u001b[38;5;241m.\u001b[39mmessage\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/base.py:125\u001b[0m, in \u001b[0;36mBaseChatModel.generate\u001b[0;34m(self, messages, stop, callbacks, tags, metadata, **kwargs)\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_managers:\n\u001b[1;32m 124\u001b[0m run_managers[i]\u001b[38;5;241m.\u001b[39mon_llm_error(e)\n\u001b[0;32m--> 125\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 126\u001b[0m flattened_outputs \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 127\u001b[0m LLMResult(generations\u001b[38;5;241m=\u001b[39m[res\u001b[38;5;241m.\u001b[39mgenerations], llm_output\u001b[38;5;241m=\u001b[39mres\u001b[38;5;241m.\u001b[39mllm_output)\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m res \u001b[38;5;129;01min\u001b[39;00m results\n\u001b[1;32m 129\u001b[0m ]\n\u001b[1;32m 130\u001b[0m llm_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_combine_llm_outputs([res\u001b[38;5;241m.\u001b[39mllm_output \u001b[38;5;28;01mfor\u001b[39;00m res \u001b[38;5;129;01min\u001b[39;00m results])\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/base.py:115\u001b[0m, in \u001b[0;36mBaseChatModel.generate\u001b[0;34m(self, messages, stop, callbacks, tags, metadata, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(messages):\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 114\u001b[0m results\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m--> 115\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generate_with_cache\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 116\u001b[0m \u001b[43m \u001b[49m\u001b[43mm\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 117\u001b[0m \u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 118\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_managers\u001b[49m\u001b[43m[\u001b[49m\u001b[43mi\u001b[49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_managers\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 119\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 120\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 121\u001b[0m )\n\u001b[1;32m 122\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_managers:\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/base.py:262\u001b[0m, in \u001b[0;36mBaseChatModel._generate_with_cache\u001b[0;34m(self, messages, stop, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 259\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAsked to cache, but no cache found at `langchain.cache`.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 260\u001b[0m )\n\u001b[1;32m 261\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported:\n\u001b[0;32m--> 262\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 263\u001b[0m \u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 264\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 265\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate(messages, stop\u001b[38;5;241m=\u001b[39mstop, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/openai.py:372\u001b[0m, in \u001b[0;36mChatOpenAI._generate\u001b[0;34m(self, messages, stop, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m ChatResult(generations\u001b[38;5;241m=\u001b[39m[ChatGeneration(message\u001b[38;5;241m=\u001b[39mmessage)])\n\u001b[1;32m 371\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcompletion_with_retry(messages\u001b[38;5;241m=\u001b[39mmessage_dicts, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mparams)\n\u001b[0;32m--> 372\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_create_chat_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n", | |
"File \u001b[0;32m~/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chat_models/openai.py:394\u001b[0m, in \u001b[0;36mChatOpenAI._create_chat_result\u001b[0;34m(self, response)\u001b[0m\n\u001b[1;32m 389\u001b[0m gen \u001b[38;5;241m=\u001b[39m ChatGeneration(\n\u001b[1;32m 390\u001b[0m message\u001b[38;5;241m=\u001b[39mmessage,\n\u001b[1;32m 391\u001b[0m generation_info\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mdict\u001b[39m(finish_reason\u001b[38;5;241m=\u001b[39mres\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfinish_reason\u001b[39m\u001b[38;5;124m\"\u001b[39m)),\n\u001b[1;32m 392\u001b[0m )\n\u001b[1;32m 393\u001b[0m generations\u001b[38;5;241m.\u001b[39mappend(gen)\n\u001b[0;32m--> 394\u001b[0m llm_output \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtoken_usage\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[43mresponse\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43musage\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_name\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_name}\n\u001b[1;32m 395\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m ChatResult(generations\u001b[38;5;241m=\u001b[39mgenerations, llm_output\u001b[38;5;241m=\u001b[39mllm_output)\n", | |
"\u001b[0;31mKeyError\u001b[0m: 'usage'" | |
] | |
} | |
] | |
}, | |
{ | |
"metadata": { | |
"trusted": true | |
}, | |
"cell_type": "code", | |
"source": "", | |
"execution_count": null, | |
"outputs": [] | |
} | |
], | |
"metadata": { | |
"_draft": { | |
"nbviewer_url": "https://gist.github.com/alonsosilvaallende/7fa2eea59d4d79c1974804baba9a9921" | |
}, | |
"gist": { | |
"id": "7fa2eea59d4d79c1974804baba9a9921", | |
"data": { | |
"description": "OpernRouter-Langchain-Palm2.ipynb", | |
"public": true | |
} | |
}, | |
"kernelspec": { | |
"name": "conda-env-langchain-py", | |
"display_name": "Python [conda env:langchain]", | |
"language": "python" | |
}, | |
"language_info": { | |
"name": "python", | |
"version": "3.11.3", | |
"mimetype": "text/x-python", | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"pygments_lexer": "ipython3", | |
"nbconvert_exporter": "python", | |
"file_extension": ".py" | |
}, | |
"varInspector": { | |
"window_display": false, | |
"cols": { | |
"lenName": 16, | |
"lenType": 16, | |
"lenVar": 40 | |
}, | |
"kernels_config": { | |
"python": { | |
"library": "var_list.py", | |
"delete_cmd_prefix": "del ", | |
"delete_cmd_postfix": "", | |
"varRefreshCmd": "print(var_dic_list())" | |
}, | |
"r": { | |
"library": "var_list.r", | |
"delete_cmd_prefix": "rm(", | |
"delete_cmd_postfix": ") ", | |
"varRefreshCmd": "cat(var_dic_list()) " | |
} | |
}, | |
"types_to_exclude": [ | |
"module", | |
"function", | |
"builtin_function_or_method", | |
"instance", | |
"_Feature" | |
] | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 5 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment