Configured for Qwen3 Coder 30B with LM Studio.
~/.continue/config.yaml
name: Local Assistant | |
version: 1.0.0 | |
schema: v1 | |
models: | |
- name: Qwen3 Coder 30B (Chat+Agent) | |
provider: openai # or lmstudio; either is okay—apiBase controls the server | |
apiBase: http://localhost:1234/v1/ | |
model: qwen/qwen3-coder-30b | |
roles: [chat, edit, apply] # chat role is used for Agent/Plan | |
capabilities: [tool_use] # <-- critical for Agent/Plan | |
defaultCompletionOptions: | |
temperature: 0.1 | |
maxTokens: 1500 | |
- name: Qwen3 Coder 30B (Autocomplete) | |
provider: openai | |
apiBase: http://localhost:1234/v1/ | |
model: qwen/qwen3-coder-30b | |
roles: [autocomplete] | |
autocompleteOptions: | |
debounceDelay: 300 | |
maxPromptTokens: 1024 | |
context: | |
- provider: code | |
- provider: docs | |
- provider: diff | |
- provider: terminal | |
- provider: problems | |
- provider: folder | |
- provider: codebase |