-
-
Save executeautomation/1407525dc0d542fe775f2f61e68f5a05 to your computer and use it in GitHub Desktop.
| { | |
| "meta": { | |
| "lastTouchedVersion": "2026.1.30", | |
| "lastTouchedAt": "2026-02-01T16:48:36.938Z" | |
| }, | |
| "wizard": { | |
| "lastRunAt": "2026-02-01T16:48:36.935Z", | |
| "lastRunVersion": "2026.1.30", | |
| "lastRunCommand": "onboard", | |
| "lastRunMode": "local" | |
| }, | |
| "models": { | |
| "providers": { | |
| "ollama": { | |
| "baseUrl": "http://localhost:11434/v1", | |
| "apiKey": "ollama-local", | |
| "api": "openai-completions", | |
| "models": [ | |
| { | |
| "id": "gpt-oss:20b", | |
| "name": "gpt-oss:20b", | |
| "reasoning": true, | |
| "input": [ | |
| "text", | |
| "image" | |
| ], | |
| "cost": { | |
| "input": 0, | |
| "output": 0, | |
| "cacheRead": 0, | |
| "cacheWrite": 0 | |
| }, | |
| "contextWindow": 260000, | |
| "maxTokens": 260000 | |
| } | |
| ] | |
| } | |
| } | |
| }, | |
| "agents": { | |
| "defaults": { | |
| "model": { | |
| "primary": "ollama/gpt-oss:20b" | |
| }, | |
| "workspace": "/home/asus/.openclaw/workspace", | |
| "compaction": { | |
| "mode": "safeguard" | |
| }, | |
| "maxConcurrent": 4, | |
| "subagents": { | |
| "maxConcurrent": 8 | |
| }, | |
| "models": { | |
| "ollama/gpt-oss:20b": {} | |
| } | |
| } | |
| }, | |
| "messages": { | |
| "ackReactionScope": "group-mentions" | |
| }, | |
| "commands": { | |
| "native": "auto", | |
| "nativeSkills": "auto" | |
| }, | |
| "hooks": { | |
| "internal": { | |
| "enabled": true, | |
| "entries": { | |
| "session-memory": { | |
| "enabled": true | |
| } | |
| } | |
| } | |
| }, | |
| "channels": { | |
| "telegram": { | |
| "enabled": true, | |
| "dmPolicy": "pairing", | |
| "botToken": "8430889345:AAGTpGO03HN0Nd-7H2MgdRp1W8RF3wFOpkk", | |
| "groupPolicy": "allowlist", | |
| "streamMode": "partial" | |
| } | |
| }, | |
| "gateway": { | |
| "port": 18789, | |
| "mode": "local", | |
| "bind": "loopback", | |
| "auth": { | |
| "mode": "token", | |
| "token": "my-secure-token-asus-gx10" | |
| }, | |
| "tailscale": { | |
| "mode": "off", | |
| "resetOnExit": false | |
| } | |
| }, | |
| "skills": { | |
| "install": { | |
| "nodeManager": "npm" | |
| } | |
| }, | |
| "plugins": { | |
| "entries": { | |
| "telegram": { | |
| "enabled": true | |
| }, | |
| "whatsapp": { | |
| "enabled": false | |
| } | |
| } | |
| }, | |
| "browser": { | |
| "executablePath": "/usr/bin/brave-browser" | |
| } | |
| } |
dont put your telegram tokens in open source
bro you gotta make a new telegram token asap
@ghillie575 @ygypt
You don't need to worry about the token being leaked.
Hey, I can’t configure my bot.
I have two PCs on the same network. LM Studio is running on one PC, and the bot is running on another.
I have the endpoint:
http://192.168.100.***:1234/api/v1/models
The bot doesn’t see it, even though I can successfully get a response from the endpoint (bots pc)
the ollama connection is working, but the openclaw is not really working anymore...
getting random messages and tool call errors... :(
depending
the ollama connection is working, but the openclaw is not really working anymore... getting random messages and tool call errors... :(
I had the same issue with llama3.2:latest, to fix this I had to change "reasoning": true to "reasoning": false.
I am using Asus Accent GX10 machine as my local machine to run Ollama