Skip to content

Instantly share code, notes, and snippets.

@WganMe
Last active April 12, 2025 11:45
Show Gist options
  • Save WganMe/2814f4164dbae63a43d44e4639937e8b to your computer and use it in GitHub Desktop.
Save WganMe/2814f4164dbae63a43d44e4639937e8b to your computer and use it in GitHub Desktop.
start_ollama_rtx2060s.ps1
# PowerShell script for Ollama with CUDA support for RTX 2060 SUPER
# Add status window
Add-Type -AssemblyName System.Windows.Forms
$statusForm = New-Object System.Windows.Forms.Form
$statusForm.Text = "Ollama Status - RTX 2060 SUPER"
$statusForm.Size = New-Object System.Drawing.Size(500,300)
$statusForm.StartPosition = "CenterScreen"
$statusBox = New-Object System.Windows.Forms.TextBox
$statusBox.Multiline = $true
$statusBox.ScrollBars = "Vertical"
$statusBox.Dock = "Fill"
$statusBox.ReadOnly = $true
$statusBox.Font = New-Object System.Drawing.Font("Consolas", 10)
$statusForm.Controls.Add($statusBox)
# Show the form
$statusForm.Show()
# Create a function to update status
function Update-Status {
param([string]$text, [string]$color = "White")
$timestamp = Get-Date -Format 'HH:mm:ss'
$statusBox.AppendText("$timestamp - $text`r`n")
$statusBox.ScrollToCaret()
# Also output to console with color
Write-Host $text -ForegroundColor $color
}
Update-Status "===================================" "Cyan"
Update-Status " Starting Local AI Environment" "Cyan"
Update-Status "===================================" "Cyan"
# Get GPU information
$gpuInfo = nvidia-smi --query-gpu=name,driver_version,memory.total,memory.free --format=csv,noheader
Update-Status "GPU Information: $gpuInfo" "Green"
# Set CUDA path
$env:CUDA_PATH = "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8"
$env:CUDA_HOME = $env:CUDA_PATH
$env:Path = "$env:CUDA_PATH\bin;$env:CUDA_PATH\libnvvp;$env:Path"
# RTX 2060 SUPER optimized settings
$env:PYTORCH_CUDA_ALLOC_CONF = "max_split_size_mb:1024,garbage_collection_threshold:0.8"
$env:CUDA_VISIBLE_DEVICES = "0"
$env:CUDA_LAUNCH_BLOCKING = "1"
Update-Status "Configuring for RTX 2060 SUPER (8GB VRAM)..." "Yellow"
Update-Status "Checking CUDA availability..." "Yellow"
$cudaScript = @"
import torch
print('CUDA Available:', torch.cuda.is_available())
if torch.cuda.is_available():
print('PyTorch CUDA Version:', torch.version.cuda)
print('CUDA Device Count:', torch.cuda.device_count())
print('CUDA Device Name:', torch.cuda.get_device_name(0))
print('CUDA Capability:', torch.cuda.get_device_capability(0))
# Test CUDA with a simple tensor operation
x = torch.rand(5, 3).cuda()
print('CUDA Tensor Test:', x.device)
print('CUDA is working correctly!')
"@
$cudaScript | Out-File -FilePath "$env:TEMP\check_cuda.py" -Encoding utf8
$cudaCheck = python "$env:TEMP\check_cuda.py"
foreach ($line in $cudaCheck) {
Update-Status $line
}
# Properly check if CUDA is available
$cudaAvailable = $false
foreach ($line in $cudaCheck) {
if ($line -match "CUDA Available: True") {
$cudaAvailable = $true
break
}
}
if (-not $cudaAvailable) {
Update-Status "WARNING: CUDA is not available. PyTorch will use CPU only.
This will significantly slow down model performance.
Please check your NVIDIA drivers and CUDA installation." "Red"
Start-Sleep -Seconds 5
} else {
Update-Status "CUDA is properly configured and working!" "Green"
}
# Check if interpreter is in PATH
if (-not (Get-Command "interpreter" -ErrorAction SilentlyContinue)) {
Update-Status "Open Interpreter not found. Installing..." "Yellow"
pip install open-interpreter
if ($LASTEXITCODE -ne 0) {
Update-Status "Failed to install Open Interpreter. Please install it manually with 'pip install open-interpreter'" "Red"
Read-Host "Press Enter to exit"
exit
}
}
# Function to check and pull Ollama models if needed
function Check-OllamaModel {
param([string]$modelName)
Update-Status "Checking if model $modelName is available..." "Yellow"
$modelExists = $false
try {
$models = Invoke-RestMethod -Uri "http://localhost:11434/api/tags" -Method Get
foreach ($model in $models.models) {
if ($model.name -eq $modelName) {
$modelExists = $true
Update-Status "Model $modelName is already available." "Green"
break
}
}
} catch {
Update-Status "Error checking models: $_" "Red"
}
if (-not $modelExists) {
Update-Status "Model $modelName not found. Pulling..." "Yellow"
try {
$pullUrl = "http://localhost:11434/api/pull"
$body = @{
name = $modelName
} | ConvertTo-Json
$progressPreference = 'SilentlyContinue'
$pullResponse = Invoke-RestMethod -Uri $pullUrl -Method Post -Body $body -ContentType "application/json"
$progressPreference = 'Continue'
Update-Status "Model $modelName pulled successfully." "Green"
} catch {
Update-Status "Error pulling model: $_" "Red"
}
}
}
# Check if Ollama is already running
$ollamaRunning = Get-Process -Name "ollama" -ErrorAction SilentlyContinue
if ($ollamaRunning) {
Update-Status "Ollama is already running." "Yellow"
} else {
Update-Status "Starting Ollama Server..." "Yellow"
# Set Ollama environment variables optimized for RTX 2060 SUPER
$env:OLLAMA_HOST = "0.0.0.0:11434"
$env:OLLAMA_CUDA = "1"
$env:OLLAMA_KEEP_ALIVE = "5m"
$env:OLLAMA_NUM_GPU_LAYERS = "32"
# Start Ollama in a new process
Start-Process -FilePath "C:\Users\maste\AppData\Local\Programs\Ollama\ollama.exe" -ArgumentList "serve" -NoNewWindow
Update-Status "Waiting for Ollama server to initialize..." "Yellow"
$maxAttempts = 30
$attempt = 0
$ready = $false
while (-not $ready -and $attempt -lt $maxAttempts) {
$attempt++
Update-Status "Checking if Ollama is ready... Attempt $attempt of $maxAttempts"
try {
$response = Invoke-WebRequest -Uri "http://localhost:11434/api/version" -Method GET -ErrorAction SilentlyContinue
if ($response.StatusCode -eq 200) {
$ready = $true
Update-Status "Ollama server is ready!" "Green"
Update-Status "Ollama version: $($response.Content)" "Cyan"
}
} catch {
Start-Sleep -Seconds 1
}
}
if (-not $ready) {
Update-Status "Timed out waiting for Ollama server.
Please check if Ollama is installed correctly." "Red"
Read-Host "Press Enter to exit"
exit
}
}
# Start auto-recovery job for Ollama
$watcherJob = Start-Job -ScriptBlock {
while ($true) {
if (-not (Get-Process -Name "ollama" -ErrorAction SilentlyContinue)) {
Write-Host "Ollama process not found. Restarting..." -ForegroundColor Red
Start-Process -FilePath "C:\Users\maste\AppData\Local\Programs\Ollama\ollama.exe" -ArgumentList "serve" -NoNewWindow
Start-Sleep -Seconds 10
}
Start-Sleep -Seconds 30
}
}
# Check for the model you want to use
Check-OllamaModel "phi3:mini"
# Display GPU memory status before starting interpreter
Update-Status "Current GPU Memory Status:" "Yellow"
$gpuStatus = nvidia-smi --query-gpu=memory.used,memory.free,memory.total --format=csv
foreach ($line in $gpuStatus) {
Update-Status $line
}
# Start performance monitoring
$monitorJob = Start-Job -ScriptBlock {
while ($true) {
$gpuInfo = nvidia-smi --query-gpu=utilization.gpu,memory.used,memory.free --format=csv,noheader
Write-Host "GPU: $gpuInfo" -ForegroundColor Cyan
Start-Sleep -Seconds 10
}
}
Update-Status ""
Update-Status "===================================" "Cyan"
Update-Status " Starting Open Interpreter" "Cyan"
Update-Status "===================================" "Cyan"
Update-Status "Using model: ollama/phi3:mini with CUDA acceleration on RTX 2060 SUPER" "Green"
Update-Status ""
# Set Open Interpreter environment variables
$env:OI_DEVICE = "cuda"
$env:INTERPRETER_MODEL = "ollama/phi3:mini"
$env:INTERPRETER_API_BASE = "http://localhost:11434"
$env:INTERPRETER_PROVIDER = "ollama"
# Set a dummy OpenAI API key to prevent errors
$env:OPENAI_API_KEY = "dummy_key_to_prevent_errors"
# Create a .interpreterrc file in the home directory
$interpreterRcContent = @"
{
"model": "ollama/phi3:mini",
"api_base": "http://localhost:11434",
"provider": "ollama",
"auto_run": false,
"context_window": 16000
}
"@
$interpreterRcPath = "$env:USERPROFILE\.interpreterrc"
$interpreterRcContent | Out-File -FilePath $interpreterRcPath -Encoding utf8
Update-Status "Created .interpreterrc configuration file" "Cyan"
# Create a wrapper script to handle exit properly
$wrapperScript = @"
import sys
import os
import subprocess
import signal
# Set dummy OpenAI API key to prevent errors on exit
os.environ['OPENAI_API_KEY'] = 'dummy_key_to_prevent_errors'
# Start interpreter process
interpreter_process = subprocess.Popen([
'interpreter',
'--model', 'ollama/phi3:mini',
'--api-base', 'http://localhost:11434',
'--provider', 'ollama'
])
try:
# Wait for the process to complete
interpreter_process.wait()
except KeyboardInterrupt:
# Handle Ctrl+C gracefully
print("\nExiting gracefully...")
interpreter_process.terminate()
try:
interpreter_process.wait(timeout=5)
except subprocess.TimeoutExpired:
interpreter_process.kill()
sys.exit(0)
"@
$wrapperScript | Out-File -FilePath "$env:TEMP\interpreter_wrapper.py" -Encoding utf8
# Start Open Interpreter with the wrapper script
Update-Status "Starting Open Interpreter with Ollama/phi3:mini model..." "Green"
python "$env:TEMP\interpreter_wrapper.py"
# Clean up jobs
Stop-Job -Job $watcherJob -ErrorAction SilentlyContinue
Stop-Job -Job $monitorJob -ErrorAction SilentlyContinue
Remove-Job -Job $watcherJob -ErrorAction SilentlyContinue
Remove-Job -Job $monitorJob -ErrorAction SilentlyContinue
Update-Status ""
Update-Status "===================================" "Cyan"
Update-Status " Session Ended" "Cyan"
Update-Status "===================================" "Cyan"
Read-Host "Press Enter to exit"
@WganMe
Copy link
Author

WganMe commented Apr 11, 2025

Shortcut run as admin
C:\Users[user]\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup
Target:
C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe -ExecutionPolicy Bypass -File "C:\Users[user].ollama\start_ollama_rtx2060s.ps1"

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment