Skip to content

Instantly share code, notes, and snippets.

@Tsangares
Created May 3, 2025 01:02
Show Gist options
  • Save Tsangares/42eb9cc6be9e3cadfe3dbdc22ce1bab5 to your computer and use it in GitHub Desktop.
Save Tsangares/42eb9cc6be9e3cadfe3dbdc22ce1bab5 to your computer and use it in GitHub Desktop.
Palestine Emoji Moderator
from dotenv import load_dotenv
import os
import discord
import asyncio
from ollama import AsyncClient
from pydantic import BaseModel
# === Load environment variables ===
load_dotenv()
LLAMA_HOST = os.getenv("LLAMA")
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
LOG_CHANNEL_ID = int(os.getenv("LOG_CHANNEL_ID") or 0)
MODEL = "deepseek-r1:7b"
if not LLAMA_HOST:
raise RuntimeError("Please set LLAMA in your .env file")
if not DISCORD_TOKEN:
raise RuntimeError("Please set DISCORD_TOKEN in your .env file")
if not LOG_CHANNEL_ID:
raise RuntimeError("Please set LOG_CHANNEL_ID in your .env file")
# === LLM Client ===
client = AsyncClient(host=LLAMA_HOST)
# === System Prompt ===
SYSTEM_PROMPT = """
You are an impartial debate monitor. Read only the *latest* user message and for each indicator decide
**only** if there is *strong evidence* to set the flag to true.
Indicators and definitions:
- logical_fallacy → a clear misuse of reasoning (e.g. straw-man, ad hominem).
- antisemitic → explicit hostility or hate speech targeting Jews.
- islamophobic → explicit hostility or hate speech targeting Muslims.
- pro_israel → explicit support or praise for Israel or its government.
- anti_israel → explicit negative or hostile remarks about Israel or its government.
- pro_palestine → explicit support or praise for Palestinians or their cause.
- anti_palestine → explicit negative or hostile remarks about Palestinians or their cause.
- overgeneralization → sweeping statements using “all”, “none”, “always”, etc.
- toxicity → insulting, threatening, or otherwise uncivil language.
For **each** flag==true supply a 1–2 sentence `reason`; if false, `reason` must be an empty string.
Always return **only** this JSON schema—no extra keys, no commentary:
```json
{
"logical_fallacy": {"flag": bool, "emoji": "🔀", "reason": string},
"antisemitic": {"flag": bool, "emoji": "✡️", "reason": string},
"islamophobic": {"flag": bool, "emoji": "☪️", "reason": string},
"pro_israel": {"flag": bool, "emoji": "🇮🇱", "reason": string},
"anti_israel": {"flag": bool, "emoji": "🚫", "reason": string},
"pro_palestine": {"flag": bool, "emoji": "🇵🇸", "reason": string},
"anti_palestine": {"flag": bool, "emoji": "⛔️", "reason": string},
"overgeneralization": {"flag": bool, "emoji": "💬", "reason": string},
"toxicity": {"flag": bool, "emoji": "☣️", "reason": string}
}
Return nothing except the JSON.
""".strip()
# === Pydantic Schemas ===
class MetricFlag(BaseModel):
flag: bool
emoji: str
reason: str
class DebateMonitorResponse(BaseModel):
logical_fallacy: MetricFlag
antisemitic: MetricFlag
islamophobic: MetricFlag
pro_israel: MetricFlag
anti_israel: MetricFlag
pro_palestine: MetricFlag
anti_palestine: MetricFlag
overgeneralization: MetricFlag
toxicity: MetricFlag
# === Async Decision Function ===
async def get_decision(text: str) -> DebateMonitorResponse:
resp = await client.chat(
model=MODEL,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": text}
],
format=DebateMonitorResponse.model_json_schema(),
keep_alive=-1,
)
return DebateMonitorResponse.model_validate_json(resp.message.content)
# === Discord Bot Setup ===
intents = discord.Intents.default()
intents.message_content = True
bot = discord.Client(intents=intents)
IGNORE_CHANNEL_IDS = [
int(x) for x in os.getenv("IGNORE_CHANNEL_IDS", "").split(",") if x.strip()
]
print(IGNORE_CHANNEL_IDS)
@bot.event
async def on_ready():
print(f"[Bot] Logged in as {bot.user} (ID: {bot.user.id})")
@bot.event
async def on_message(message):
# Ignore bots
if message.author.bot:
return
# 2) Ignore entire channels
if message.channel.id in IGNORE_CHANNEL_IDS:
return
# Log incoming message
print(f"[Discord] {message.author} in #{message.channel}: {message.content!r}")
try:
# Analyze the message
analysis = await get_decision(message.content)
metrics = analysis.model_dump()
# Build the log text
lines = [
f"**Message** from {message.author} in <#{message.channel.id}>:\n```{message.content}```",
"**Flags:**"
]
for key, m in metrics.items():
if m["flag"]:
lines.append(f"- **{key}** {m['emoji']}: {m['reason']}")
if len(lines) == 2:
lines.append("_No flags raised._")
log_text = "\n".join(lines)
# Send to the designated log channel
log_channel = bot.get_channel(LOG_CHANNEL_ID)
if log_channel:
await log_channel.send(log_text)
else:
print(f"[Warning] Log channel {LOG_CHANNEL_ID} not found")
except Exception as e:
print(f"[Error] processing message: {e}")
if __name__ == "__main__":
bot.run(DISCORD_TOKEN)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment