Skip to content

Instantly share code, notes, and snippets.

@boxabirds
Created September 14, 2025 10:25
Show Gist options
  • Save boxabirds/cf4c4c2f467d0902f22fa7aab8cec38a to your computer and use it in GitHub Desktop.
Save boxabirds/cf4c4c2f467d0902f22fa7aab8cec38a to your computer and use it in GitHub Desktop.
Compressed context ReAct
# sketch of what history compression would look like
# if using approach in https://arxiv.org/abs/2509.06283
# source https://x.com/omarsar0/status/1966900691009720455?s=46
import dspy
from typing import Dict, Callable
class CompressedReAct(dspy.ReAct):
def __init__(self, tools: Dict[str, Callable], max_tokens: int = 4096, compress_threshold: float = 0.8):
super().__init__("context -> action: str") # ReAct's core sig for action gen
self.tools = tools
self.max_tokens = max_tokens
self.compress_threshold = compress_threshold
# Simple compressor: Reuse LM for ad-hoc summary (no separate sig)
self.evolving_prompt = None # State for collapse
def forward(self, query: str) -> Dict[str, str]:
self.evolving_prompt = query
steps = 0
max_steps = 20
while steps < max_steps:
# Proactive compression check
if len(self.evolving_prompt) > self.max_tokens * self.compress_threshold:
comp_prompt = dspy.ChainOfThought("history -> summary: str")(
history=f"Summarize tool history concisely: {self.evolving_prompt}"
)
self.evolving_prompt = f"{query}; [Compressed: {comp_prompt.summary}]"
# Feed collapsed prompt to ReAct for fresh single-turn action
react_input = dspy.InputField(context=self.evolving_prompt + "\n[Reason, then act: TOOL name(args) or FINAL answer.]")
prediction = super().forward(context=react_input.context)
action = prediction.action.strip()
if "FINAL" in action:
return {"answer": action.split("FINAL:")[-1].strip()}
elif "TOOL" in action:
# Parse/execute (ReAct-style: assumes "TOOL search(quantum)")
tool_str = action.replace("TOOL ", "")
if "(" in tool_str:
tool_name, arg_str = tool_str.split("(", 1)
arg_str = arg_str.rstrip(")")
else:
tool_name, arg_str = tool_str, ""
tool_out = self.tools.get(tool_name, lambda x: f"Mock {tool_name}({x})")(arg_str)
# Inline collapse—no history bloat
self.evolving_prompt += f"; TOOL {tool_name}({arg_str}): {tool_out}"
steps += 1
else:
self.evolving_prompt += "; [Invalid action—retry]"
steps += 1
return {"answer": f"Max steps: {self.evolving_prompt}"}
# Mock tools (same as before)
def mock_search(q: str) -> str:
return f"Quantum history: 1980s Feynman; 1994 Shor algo."
tools = {"search": mock_search}
# Usage
dspy.settings.configure(lm=dspy.OpenAI(model="gpt-4o-mini"))
agent = CompressedReAct(tools=tools)
result = agent(query="Research quantum computing history.")
print(result["answer"])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment