Skip to content

Instantly share code, notes, and snippets.

@ayr-ton
Last active October 14, 2025 19:30
Show Gist options
  • Save ayr-ton/29d275ca4cf51bd8c7f59de269299a40 to your computer and use it in GitHub Desktop.
Save ayr-ton/29d275ca4cf51bd8c7f59de269299a40 to your computer and use it in GitHub Desktop.
ai_stuff
#!/usr/bin/env bash
set -euo pipefail
# This script uses an AI model to generate file content based on a prompt and context from other files.
# It requires Ollama to be installed and accessible in the system's PATH.
#
# Usage:
# ai-create.sh <output_file> "<prompt>" <context_file1> [context_file2 ...]
#
# Environment Variables:
# AI_CREATE_MODEL: Sets the Ollama model to use (default: "qwen3-coder:480b").
ai_create_file() {
# Set the AI model, allowing for an override via an environment variable.
local model="${AI_CREATE_MODEL:-qwen3-coder:480b}"
# --- 1. Validate Input Arguments ---
if [ $# -lt 3 ]; then
echo "Usage: $(basename "$0") <output_file> \"<prompt>\" <context_file1> [context_file2 ...]" >&2
echo "Error: Not enough arguments provided." >&2
return 1
fi
local output_file="$1"
local user_prompt="$2"
shift 2 # The remaining arguments are context files.
local context_files=("$@")
# --- 2. Validate Context Files ---
local all_files_valid=true
local file_list_for_prompt=""
for file in "${context_files[@]}"; do
if [ ! -f "$file" ] || [ ! -r "$file" ]; then
echo "Error: Context path '$file' is not a readable file." >&2
all_files_valid=false
fi
file_list_for_prompt+="'$file' "
done
file_list_for_prompt=${file_list_for_prompt% } # Trim trailing space
if [ "$all_files_valid" = false ]; then
echo "Error: One or more context files are invalid. Aborting." >&2
return 1
fi
# --- 3. Validate and Prepare Output Path ---
local output_dir
output_dir=$(dirname "$output_file")
if ! mkdir -p "$output_dir" || [ ! -w "$output_dir" ]; then
echo "Error: Output directory '$output_dir' could not be created or is not writable." >&2
return 1
fi
# --- 4. Build the AI Prompt ---
echo "Asking AI ($model) to create '$output_file'..." >&2
echo " Prompt: \"$user_prompt\"" >&2
echo " Using context from files: ${file_list_for_prompt}" >&2
local context_content
context_content=$(cat "${context_files[@]}")
local final_prompt
final_prompt=$(cat <<EOF
You are an AI assistant programmed to generate file content based on user instructions and provided context.
Your task is to generate the content for the file named: '$output_file'
Follow these instructions precisely:
$user_prompt
Use the following concatenated content from the provided context file(s) (${file_list_for_prompt}) as reference material:
\`\`\`
$context_content
\`\`\`
CRITICAL OUTPUT REQUIREMENTS:
1. Generate *only* the raw content required for the target file '$output_file'.
2. Your response must *not* contain any introductory phrases, explanations, apologies, or summaries.
3. Your response must *not* include markdown formatting like \`\`\` unless it's explicitly part of the requested file content itself.
4. Your response must *not* contain any metadata or diagnostic tags like <think>...</think>.
Begin the raw file content now:
EOF
)
# --- 5. Execute Ollama and Handle Output ---
if ! ollama run "$model" "$final_prompt" > "$output_file"; then
echo "Error: Ollama command failed. Output file '$output_file' may be incomplete or empty." >&2
# Clean up the potentially empty/failed file
rm -f "$output_file"
return 1
fi
# --- 6. Verify Result ---
if [ -s "$output_file" ]; then
echo "File '$output_file' created successfully." >&2
else
echo "Warning: Ollama command succeeded, but the output file '$output_file' is empty. The AI might not have generated usable content." >&2
# No need to return an error, but the warning is important.
fi
}
# Execute the main function with all script arguments.
ai_create_file "$@"
#!/usr/bin/env bash
set -euo pipefail
explain_file() {
local filename="${1:-}"
local model="${AI_EXPLAIN_MODEL:-gpt-oss:20b}"
# Validate input
if [[ -z "$filename" ]]; then
echo "Usage: ai-explain <filename>" >&2
return 1
fi
# Resolve absolute path
local abs_filename
abs_filename=$(realpath "$filename" 2>/dev/null) || {
echo "Error: Cannot resolve path '$filename'" >&2
return 1
}
# Check file accessibility
if [[ ! -f "$abs_filename" ]] || [[ ! -r "$abs_filename" ]]; then
echo "Error: File '$filename' not found or is not readable." >&2
return 1
fi
echo "Asking AI ($model) to explain '$filename'..." >&2
# Build prompt with file metadata
local prompt
local file_size
file_size=$(stat -f%z "$abs_filename" 2>/dev/null || stat -c%s "$abs_filename" 2>/dev/null || echo "unknown")
prompt=$(cat <<EOF
Explain the purpose and content of the following file named '$(basename "$abs_filename")'.
File path: $abs_filename
File size: $file_size bytes
File type: $(file -b "$abs_filename" 2>/dev/null || echo "unknown")
Please analyze the content and provide:
1. What type of file this is (configuration, code, data, etc.)
2. What the file likely does or contains
3. Key components or structure
4. Any notable patterns or important details
File Content:
\`\`\`
$(cat "$abs_filename")
\`\`\`
EOF
)
# Execute Ollama with error handling
if ! ollama run "$model" "$prompt"; then
echo "Error: Ollama command failed." >&2
return 1
fi
}
explain_file "$@"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment