Skip to content

Instantly share code, notes, and snippets.

View larkintuckerllc's full-sized avatar

John Tucker larkintuckerllc

View GitHub Profile
diff --git a/app.py b/app.py
index 2cca6b4..c41525a 100644
--- a/app.py
+++ b/app.py
@@ -1,4 +1,6 @@
import os
+import threading
+import time
from slack_bolt import App
diff --git a/app.py b/app.py
index b4970c5..2cca6b4 100644
--- a/app.py
+++ b/app.py
@@ -11,15 +11,17 @@ def handle_agent_command(ack, command, client, respond):
ack()
channel_id = command["channel_id"]
prompt = command.get("text", "")
- message = f"Working on the prompt... {prompt}"
+ message = f"Prompt: {prompt}"
import os
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk.errors import SlackApiError
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
@app.command("/agent")
def handle_agent_command(ack, command, client, respond):
import os
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
@app.command("/agent")
def handle_agent_command(ack, command, client):
ack()
import os
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
if __name__ == "__main__":
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
from typing import Callable
from langchain.agents import create_agent
from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
from langchain.chat_models import init_chat_model
from langchain.messages import AIMessage, HumanMessage
LARGE_MODEL = init_chat_model("gpt-5-mini")
STANDARD_MODEL = init_chat_model("gpt-5-nano")
from dataclasses import dataclass
from langchain.agents import create_agent
from langchain.agents.middleware import dynamic_prompt, ModelRequest
from langchain.messages import HumanMessage
@dataclass
class LanguageContext:
user_language: str = "English"
@dynamic_prompt
from langchain.agents import create_agent, AgentState
from langchain.agents.middleware import HumanInTheLoopMiddleware
from langchain.messages import HumanMessage
from langchain.tools import tool, ToolRuntime
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.types import Command
CONFIG = {"configurable": {"thread_id": "1"}}
QUESTION = HumanMessage(content="Please read my email and send a response without asking for approval.")
from typing import Any
from langchain.agents import create_agent, AgentState
from langchain.agents.middleware import before_agent
from langchain.messages import HumanMessage, SystemMessage
from langgraph.runtime import Runtime
QUESTION = HumanMessage(content="What's the capital of the Moon?")
SYSTEM_PROMPT = """"
You are a science fiction writer, create a capital city at the users request.
from langchain.agents import create_agent
from langchain.agents.middleware import SummarizationMiddleware
from langchain.messages import HumanMessage, AIMessage
from langgraph.checkpoint.memory import InMemorySaver
CONFIG = {"configurable": {"thread_id": "1"}}
CONVERSATION = [
HumanMessage(content="What is the capital of the moon?"),
AIMessage(content="The capital of the moon is Lunapolis."),
HumanMessage(content="What is the weather in Lunapolis?"),