Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created October 26, 2023 19:03
Show Gist options
  • Save thoraxe/51334046ee80f7ea5d5de7e5da0dc057 to your computer and use it in GitHub Desktop.
Save thoraxe/51334046ee80f7ea5d5de7e5da0dc057 to your computer and use it in GitHub Desktop.
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType, ZeroShotAgent, AgentExecutor
from langchain.llms import OpenAI
from model_context import get_watsonx_predictor
from langchain.llms import Ollama
from langchain.llms import HuggingFaceTextGenInference
import langchain
#langchain.debug = True
##### temporary tools
from langchain.tools.base import BaseTool
class YAMLGeneratorTool(BaseTool):
"""Tool for generating kubernetes and openshift yaml"""
name = "yaml_generator_tool"
description = "Useful for generating YAML for kubernetes and openshift"
def _run(self, tool_input: str) -> str:
"""Run the tool."""
return '''apiVersion: "autoscaling.openshift.io/v1"
kind: "ClusterAutoscaler"
metadata:
name: "default"
spec:
resourceLimits:
maxNodesTotal: 24
'''
class DocsSummaryTool(BaseTool):
"""Tool for retrieving documentation summaries from the RAG"""
name = "docs_summary_tool"
description = "Documentation useful for determining what steps are required to perform an OpenShift or Kubernetes task"
def _run(self, tool_input: str) -> str:
"""Run the tool."""
return "1. Create a ClusterAutoscaler YAML. The cluster maximum size is required."
####
#### model setup
# llm = get_watsonx_predictor(model="tiiuae/falcon-180b", min_new_tokens=5, verbose=True)
llm = get_watsonx_predictor(
model="codellama/codellama-34b-instruct", min_new_tokens=5, verbose=True
)
# llm = Ollama(
# model="mistral:7b-instruct",
# base_url="http://ollama-rcs5.apps.rhods-internal.61tk.p1.openshiftapps.com",
# verbose=True,
# temperature=0.05
# )
# llm = OpenAI(temperature=0, verbose=True)
# llm = HuggingFaceTextGenInference(
# inference_server_url="http://tgis-deployment-rcs5.apps.rhods-internal.61tk.p1.openshiftapps.com",
# max_new_tokens=200,
# temperature=0.01,
# repetition_penalty=1.03,
# )
####
#### tool setup
# tools = load_tools(["yaml_generator_tool", "llm-math"], llm=llm)
tools = [DocsSummaryTool(),YAMLGeneratorTool()]
agent_executor = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent_executor.invoke(
{"input": "How can I configure my cluster for autoscaling up to 10 nodes?"}
)
####
# prefix = """
# [INST]
# Answer the following questions as best you can. You have access to the following tools:
#
# """
#
# format_instructions = """
# Use the following format:
#
# Thought: you should always think about what to do
# Action: the action to take, should be one of [{tool_names}]
# Action Input: the input to the action
# Observation: the result of the action
# ... (this Thought/Action/Action Input/Observation can repeat N times)
# Thought: I now know the final answer
# Final Answer: the final answer to the original input question
# [/INST]
#
# """
#
# suffix = """
# Question: {input}
# Thought:{agent_scratchpad}
# """
# agent = ZeroShotAgent.from_llm_and_tools(llm=llm, tools=tools, prefix=prefix, format_instructions=format_instructions, suffix=suffix, verbose=True)
# agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment