Skip to content

Instantly share code, notes, and snippets.

@leek
Created April 3, 2026 14:49
Show Gist options
  • Select an option

  • Save leek/9503170cb32712d62da18298984cbc3a to your computer and use it in GitHub Desktop.

Select an option

Save leek/9503170cb32712d62da18298984cbc3a to your computer and use it in GitHub Desktop.
Multi-model AI commit review — GitHub Actions workflow that reviews every commit with Claude, GPT, and Gemini
name: AI Commit Review
on:
push:
branches: [main]
permissions:
contents: write
issues: write
pull-requests: write
concurrency:
group: ai-review-${{ github.sha }}
cancel-in-progress: true
jobs:
# Enumerate all commits in this push and output a matrix
enumerate:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.commits.outputs.matrix }}
count: ${{ steps.commits.outputs.count }}
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: List commits in push
id: commits
env:
BEFORE: ${{ github.event.before }}
AFTER: ${{ github.event.after }}
run: |
# Handle initial push (before is all zeros)
if [[ "$BEFORE" == "0000000000000000000000000000000000000000" ]]; then
SHAS=$(git log --format='%H' -1 "$AFTER")
else
SHAS=$(git log --format='%H' "${BEFORE}..${AFTER}")
fi
# Build matrix as JSON array of SHAs only (no untrusted data in JSON construction)
MATRIX_JSON=$(echo "$SHAS" | jq -R -s -c 'split("\n") | map(select(. != "")) | map({sha: .})')
COUNT=$(echo "$MATRIX_JSON" | jq 'length')
echo "Found ${COUNT} commit(s) in push"
echo "count=${COUNT}" >> "$GITHUB_OUTPUT"
echo "matrix=${MATRIX_JSON}" >> "$GITHUB_OUTPUT"
# Per-commit: check, diff, review with all providers, create digest
review:
needs: enumerate
if: needs.enumerate.outputs.count != '0'
runs-on: ubuntu-latest
strategy:
fail-fast: false
max-parallel: 5
matrix:
commit: ${{ fromJson(needs.enumerate.outputs.matrix) }}
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check if review should run
id: check
env:
COMMIT_SHA: ${{ matrix.commit.sha }}
run: |
# Extract commit metadata safely via git
MESSAGE=$(git log -1 --format='%s' "$COMMIT_SHA")
AUTHOR=$(git log -1 --format='%an' "$COMMIT_SHA")
# Skip dependabot commits
if [[ "$MESSAGE" == "build(deps)"* ]] || [[ "$MESSAGE" == *"dependabot[bot]"* ]] || [[ "$AUTHOR" == *"dependabot"* ]]; then
echo "Skipping: dependabot commit"
echo "should_review=false" >> "$GITHUB_OUTPUT"
exit 0
fi
# Skip [skip-review] or [skip-ci] commits
if [[ "$MESSAGE" == *"[skip-review]"* ]] || [[ "$MESSAGE" == *"[skip-ci]"* ]]; then
echo "Skipping: skip tag in commit message"
echo "should_review=false" >> "$GITHUB_OUTPUT"
exit 0
fi
# Skip merge commits
if [[ "$MESSAGE" == Merge* ]]; then
echo "Skipping: merge commit"
echo "should_review=false" >> "$GITHUB_OUTPUT"
exit 0
fi
# Skip auto-style fix commits
if [[ "$MESSAGE" == *"fix code style"* ]] || [[ "$MESSAGE" == *"Fix Code Style"* ]]; then
echo "Skipping: auto style fix commit"
echo "should_review=false" >> "$GITHUB_OUTPUT"
exit 0
fi
# Skip ai-review commits
if [[ "$MESSAGE" == "ai-review:"* ]]; then
echo "Skipping: ai-review commit"
echo "should_review=false" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "should_review=true" >> "$GITHUB_OUTPUT"
- name: Extract and filter diff
if: steps.check.outputs.should_review == 'true'
id: diff
env:
COMMIT_SHA: ${{ matrix.commit.sha }}
run: |
git diff "${COMMIT_SHA}~1" "${COMMIT_SHA}" \
-- . \
':!package-lock.json' \
':!composer.lock' \
':!yarn.lock' \
':!pnpm-lock.yaml' \
':!vendor/' \
':!vendor-src/' \
':!node_modules/' \
':!storage/' \
':!tests/' \
':!*.csv' \
':!*.sql' \
> /tmp/filtered-diff.txt
LINE_COUNT=$(grep -c '^+' /tmp/filtered-diff.txt || true)
echo "Filtered diff: ${LINE_COUNT} added/changed lines"
echo "line_count=${LINE_COUNT}" >> "$GITHUB_OUTPUT"
if [ "$LINE_COUNT" -gt 5000 ]; then
echo "Diff too large (${LINE_COUNT} lines > 5000). Skipping."
echo "skip_large=true" >> "$GITHUB_OUTPUT"
else
echo "skip_large=false" >> "$GITHUB_OUTPUT"
fi
- name: Run Claude review
if: steps.check.outputs.should_review == 'true' && steps.diff.outputs.skip_large != 'true'
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
node .github/scripts/ai-review.mjs --provider claude < /tmp/filtered-diff.txt > /tmp/claude-findings.json 2>/tmp/claude-review.log || true
- name: Run GPT review
if: steps.check.outputs.should_review == 'true' && steps.diff.outputs.skip_large != 'true'
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
node .github/scripts/ai-review.mjs --provider openai < /tmp/filtered-diff.txt > /tmp/openai-findings.json 2>/tmp/openai-review.log || true
- name: Run Gemini review
if: steps.check.outputs.should_review == 'true' && steps.diff.outputs.skip_large != 'true'
env:
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
run: |
node .github/scripts/ai-review.mjs --provider gemini < /tmp/filtered-diff.txt > /tmp/gemini-findings.json 2>/tmp/gemini-review.log || true
- name: Create digest
if: steps.check.outputs.should_review == 'true' && steps.diff.outputs.skip_large != 'true'
env:
GH_TOKEN: ${{ github.token }}
GITHUB_SHA: ${{ matrix.commit.sha }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_ACTOR: ${{ github.actor }}
COMMIT_SHA: ${{ matrix.commit.sha }}
run: |
# Extract commit message safely for the digest script
export COMMIT_MESSAGE
COMMIT_MESSAGE=$(git log -1 --format='%s' "$COMMIT_SHA")
# Set up findings directories expected by digest script
mkdir -p claude-findings openai-findings gemini-findings
[ -f /tmp/claude-findings.json ] && cp /tmp/claude-findings.json claude-findings/findings.json || true
[ -f /tmp/openai-findings.json ] && cp /tmp/openai-findings.json openai-findings/findings.json || true
[ -f /tmp/gemini-findings.json ] && cp /tmp/gemini-findings.json gemini-findings/findings.json || true
bash .github/scripts/ai-review-digest.sh
#!/usr/bin/env bash
set -euo pipefail
# AI Review Digest Script
# Merges findings from multiple AI providers into a single GitHub Issue.
# Optionally creates a draft fix PR for high-confidence findings.
#
# Expected environment variables:
# GITHUB_SHA, GITHUB_REPOSITORY, GITHUB_ACTOR, COMMIT_MESSAGE
# Expected files in working directory:
# claude-findings/findings.json, openai-findings/findings.json, gemini-findings/findings.json
COMMIT_SHA="${GITHUB_SHA}"
COMMIT_SHORT="${COMMIT_SHA:0:7}"
REPO="${GITHUB_REPOSITORY}"
AUTHOR="${GITHUB_ACTOR}"
MESSAGE="${COMMIT_MESSAGE:-"(no message)"}"
# Clean up temp files on exit
trap 'rm -f "${ISSUE_BODY_FILE:-}" "${PR_BODY_FILE:-}"' EXIT
# Collect all findings files that exist
FINDING_FILES=()
for provider in claude openai gemini; do
file="${provider}-findings/findings.json"
if [ -f "$file" ] && jq empty "$file" 2>/dev/null; then
FINDING_FILES+=("$file")
else
echo "Warning: No valid findings from ${provider}"
fi
done
if [ ${#FINDING_FILES[@]} -eq 0 ]; then
echo "No findings files found. Skipping digest."
exit 0
fi
# Merge all findings into a single JSON array with provider attribution
MERGED=$(jq -s '
[.[] | .provider as $p | .findings[]? | . + {source: $p}]
| sort_by(
if .severity == "critical" then 0
elif .severity == "warning" then 1
else 2 end
)
' "${FINDING_FILES[@]}")
TOTAL=$(echo "$MERGED" | jq 'length')
if [ "$TOTAL" -eq 0 ]; then
echo "No findings across all providers. Clean commit!"
exit 0
fi
echo "Found ${TOTAL} total findings across ${#FINDING_FILES[@]} providers"
# Collect summaries from each provider
SUMMARIES=$(jq -s '[.[] | {provider: .provider, summary: .summary}]' "${FINDING_FILES[@]}")
# Deduplicate findings by file+line proximity (within 3 lines = same finding)
# Group them and track which providers flagged each
DEDUPED=$(echo "$MERGED" | jq '
reduce .[] as $f ([];
. as $acc |
($acc | map(select(
.file == $f.file and
((.line - $f.line) | fabs) <= 3 and
.severity == $f.severity
)) | first) as $existing |
if $existing then
map(
if . == $existing then
.sources = (.sources + [$f.source] | unique)
else . end
)
else
. + [$f + {sources: [$f.source]}]
end
)
| sort_by(
if .severity == "critical" then 0
elif .severity == "warning" then 1
else 2 end
)
')
DEDUPED_COUNT=$(echo "$DEDUPED" | jq 'length')
echo "After deduplication: ${DEDUPED_COUNT} unique findings"
# Build the issue body
build_findings_section() {
local severity="$1"
local label="$2"
local items
items=$(echo "$DEDUPED" | jq -r --arg sev "$severity" '
[.[] | select(.severity == $sev)] |
if length == 0 then empty
else
.[] |
"- **\(.title)** (`\(.file):\(.line // "?")`) — flagged by \(.sources | join(", "))\n \(.description)" +
if .fix then "\n <details><summary>Suggested fix</summary>\n\n ```\n \(.fix)\n ```\n\n </details>" else "" end
end
')
if [ -n "$items" ]; then
echo ""
echo "### ${label}"
echo "$items"
fi
}
# Build model agreement table
build_agreement_table() {
echo ""
echo "### Model Agreement"
echo "| Finding | Claude | GPT | Gemini | Confidence |"
echo "|---------|--------|-----|--------|------------|"
echo "$DEDUPED" | jq -r '
.[] |
"| \(.title | .[0:40]) | \(if (.sources | index("claude")) then "x" else " " end) | \(if (.sources | index("openai")) then "x" else " " end) | \(if (.sources | index("gemini")) then "x" else " " end) | \(.confidence // "?") |"
'
}
# Build summaries section
build_summaries_section() {
echo ""
echo "### Provider Summaries"
echo "$SUMMARIES" | jq -r '.[] | "- **\(.provider):** \(.summary)"'
}
# Count by severity
CRITICAL_COUNT=$(echo "$DEDUPED" | jq '[.[] | select(.severity == "critical")] | length')
WARNING_COUNT=$(echo "$DEDUPED" | jq '[.[] | select(.severity == "warning")] | length')
INFO_COUNT=$(echo "$DEDUPED" | jq '[.[] | select(.severity == "info")] | length')
# Only create issues for critical findings — warnings/info are not worth the noise
if [ "$CRITICAL_COUNT" -eq 0 ]; then
echo "No critical findings. Skipping issue creation (${WARNING_COUNT} warnings, ${INFO_COUNT} info)."
exit 0
fi
# Check for existing issue for this commit to prevent duplicates (REST API)
EXISTING=$(gh api "repos/${REPO}/issues?labels=ai-review&state=all&per_page=100" \
--jq "[.[] | select(.title | startswith(\"[AI Review] ${COMMIT_SHORT}\"))] | .[0].number // empty")
if [ -n "$EXISTING" ]; then
echo "Issue already exists for ${COMMIT_SHORT}: #${EXISTING}. Skipping."
exit 0
fi
# Determine overall summary for the title
FIRST_SUMMARY=$(echo "$SUMMARIES" | jq -r '.[0].summary // "Review complete"')
TITLE_PREFIX="[AI Review] ${COMMIT_SHORT}:"
ISSUE_TITLE="${TITLE_PREFIX} ${FIRST_SUMMARY}"
# Truncate title to 256 chars (GitHub limit)
ISSUE_TITLE="${ISSUE_TITLE:0:256}"
ISSUE_BODY="## Commit Review: ${COMMIT_SHORT} by @${AUTHOR}
**Commit:** [\`${COMMIT_SHORT}\`](https://github.com/${REPO}/commit/${COMMIT_SHA})
**Message:** ${MESSAGE}
**Models:** Claude Opus 4.6, GPT-5.4, Gemini 3.1 Pro
**Findings:** ${CRITICAL_COUNT} critical, ${WARNING_COUNT} warnings, ${INFO_COUNT} info
$(build_findings_section "critical" "Critical")
$(build_findings_section "warning" "Warnings")
$(build_findings_section "info" "Info")
$(build_agreement_table)
$(build_summaries_section)"
# Ensure label exists (REST API — ignore 422 if already exists)
gh api "repos/${REPO}/labels" \
-f name="ai-review" -f color="7057ff" -f description="AI-generated code review" \
> /dev/null 2>&1 || true
# Create the issue via REST API (avoids GraphQL permission issues with GITHUB_TOKEN)
ISSUE_BODY_FILE=$(mktemp)
jq -n \
--arg title "${ISSUE_TITLE}" \
--arg body "${ISSUE_BODY}" \
'{title: $title, body: $body, labels: ["ai-review"]}' > "${ISSUE_BODY_FILE}"
ISSUE_URL=$(gh api "repos/${REPO}/issues" \
--input "${ISSUE_BODY_FILE}" \
--jq '.html_url')
rm -f "${ISSUE_BODY_FILE}"
if [ -z "${ISSUE_URL}" ]; then
echo "ERROR: Failed to create issue"
exit 1
fi
echo "Created issue: ${ISSUE_URL}"
# Create a fix PR if there are high-confidence fixes agreed on by 2+ models
HIGH_CONF_FIXES=$(echo "$DEDUPED" | jq '[.[] | select(.confidence == "high" and .fix != null and (.sources | length) >= 2)]')
HIGH_CONF_COUNT=$(echo "$HIGH_CONF_FIXES" | jq 'length')
if [ "$HIGH_CONF_COUNT" -gt 0 ]; then
echo "Found ${HIGH_CONF_COUNT} high-confidence fixes agreed on by 2+ models"
FIX_BRANCH="ai-fix/${COMMIT_SHORT}"
# Create and switch to fix branch
git checkout -b "${FIX_BRANCH}"
FIXES_APPLIED=0
# Apply each fix (best-effort, skip failures)
echo "$HIGH_CONF_FIXES" | jq -c '.[]' | while read -r fix; do
FILE=$(echo "$fix" | jq -r '.file')
LINE=$(echo "$fix" | jq -r '.line')
FIX_CODE=$(echo "$fix" | jq -r '.fix')
TITLE=$(echo "$fix" | jq -r '.title')
if [ -f "$FILE" ] && [ -n "$FIX_CODE" ] && [ "$FIX_CODE" != "null" ]; then
echo "Applying fix: ${TITLE} in ${FILE}:${LINE}"
# We can't reliably apply arbitrary code fixes via sed
# Instead, we'll note them in the PR for human review
FIXES_APPLIED=$((FIXES_APPLIED + 1))
fi
done
# Only create PR if we have fixes to suggest
if [ "$HIGH_CONF_COUNT" -gt 0 ]; then
# Build PR body with suggested fixes
PR_BODY="## AI-Suggested Fixes for ${COMMIT_SHORT}
Related issue: ${ISSUE_URL}
The following high-confidence fixes were identified by 2+ AI models.
**These fixes need human review before applying.**
$(echo "$HIGH_CONF_FIXES" | jq -r '
.[] |
"### \(.title)\n**File:** \(.file):\(.line // "?")\n**Flagged by:** \(.sources | join(", "))\n**Confidence:** \(.confidence)\n\n\(.description)\n\n**Suggested fix:**\n```\n\(.fix)\n```\n"
')"
# Create an empty commit with the fix suggestions documented
git commit --allow-empty -m "$(cat <<EOF
ai-review: suggested fixes for ${COMMIT_SHORT}
Fixes suggested by AI review. See PR body for details.
These are suggestions only — human review required.
Related: ${ISSUE_URL}
EOF
)"
git push origin "${FIX_BRANCH}"
PR_BODY_FILE=$(mktemp)
jq -n \
--arg title "[AI Fix] Suggested fixes for ${COMMIT_SHORT}" \
--arg body "${PR_BODY}" \
--arg head "${FIX_BRANCH}" \
'{title: $title, body: $body, head: $head, base: "main", draft: true}' > "${PR_BODY_FILE}"
PR_URL=$(gh api "repos/${REPO}/pulls" \
--input "${PR_BODY_FILE}" \
--jq '.html_url')
rm -f "${PR_BODY_FILE}"
echo "Created draft fix PR: ${PR_URL}"
# Add PR link as comment on the issue
ISSUE_NUMBER=$(basename "${ISSUE_URL}")
if [ -z "${ISSUE_NUMBER}" ] || ! [[ "${ISSUE_NUMBER}" =~ ^[0-9]+$ ]]; then
echo "ERROR: Failed to extract issue number from ISSUE_URL: ${ISSUE_URL}"
exit 1
fi
gh api "repos/${REPO}/issues/${ISSUE_NUMBER}/comments" \
-f body="Draft fix PR created: ${PR_URL}"
fi
# Return to original ref
git checkout -
fi
echo "Done!"
You are a senior code reviewer analyzing a git commit diff. Your job is to find bugs, security issues, performance problems, and code quality concerns.
Rules:
- Only report real, actionable issues. Do not report style preferences or nitpicks.
- Focus on: bugs, logic errors, security vulnerabilities, performance regressions, race conditions, missing error handling.
- Do not report issues in deleted code (lines starting with -).
- NEVER claim a package version, GitHub Action version, or dependency version "does not exist." You cannot verify what versions are currently published. Dependabot and other tools check real registries before creating version bumps — trust them.
- Maximum 10 findings. If there are more, prioritize by severity.
- If the commit looks clean, return an empty findings array.
- Return ONLY valid JSON matching the schema below. No markdown, no explanation, no code fences.
JSON Schema:
{
"summary": "one-line quality assessment of the commit",
"findings": [
{
"severity": "critical|warning|info",
"title": "short title (under 80 chars)",
"description": "what is wrong and why it matters",
"file": "path/to/file.ext",
"line": 42,
"fix": "corrected code snippet, or null if no clear fix",
"confidence": "high|medium|low"
}
]
}
Severity guide:
- critical: Will cause bugs, data loss, security vulnerabilities, or crashes in production
- warning: Could cause issues under certain conditions, or significantly hurts maintainability
- info: Minor improvement opportunity, not urgent
Confidence guide:
- high: You are very sure this is a real issue and the fix is correct
- medium: Likely an issue but context might change the assessment
- low: Possible issue, needs human review
Now analyze this commit diff:
#!/usr/bin/env node
/**
* AI Code Review Script
* Reads a git diff from stdin, sends it to an AI provider, outputs JSON findings.
* Usage: git diff HEAD~1 HEAD | node ai-review.mjs --provider claude|openai|gemini
*/
import { readFileSync } from 'node:fs';
import { argv, stdin, stdout, stderr, exit } from 'node:process';
const PROVIDERS = {
claude: {
url: 'https://api.anthropic.com/v1/messages',
model: 'claude-opus-4-6',
envKey: 'ANTHROPIC_API_KEY',
buildRequest(apiKey, prompt) {
return {
headers: {
'x-api-key': apiKey,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
},
body: JSON.stringify({
model: this.model,
max_tokens: 4096,
messages: [{ role: 'user', content: prompt }],
}),
};
},
extractText(json) {
return json.content?.[0]?.text ?? '';
},
},
openai: {
url: 'https://api.openai.com/v1/chat/completions',
model: 'gpt-5.4',
envKey: 'OPENAI_API_KEY',
buildRequest(apiKey, prompt) {
return {
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
max_completion_tokens: 4096,
response_format: { type: 'json_object' },
messages: [
{ role: 'system', content: 'You are a code reviewer. Always respond with valid JSON.' },
{ role: 'user', content: prompt },
],
}),
};
},
extractText(json) {
return json.choices?.[0]?.message?.content ?? '';
},
},
gemini: {
url: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-3.1-pro-preview:generateContent',
model: 'gemini-3.1-pro-preview',
envKey: 'GEMINI_API_KEY',
buildRequest(apiKey, prompt) {
return {
headers: {
'x-goog-api-key': apiKey,
'Content-Type': 'application/json',
},
body: JSON.stringify({
contents: [{ parts: [{ text: prompt }] }],
generationConfig: {
responseMimeType: 'application/json',
},
}),
};
},
extractText(json) {
return json.candidates?.[0]?.content?.parts?.[0]?.text ?? '';
},
},
};
function parseArgs() {
const providerIdx = argv.indexOf('--provider');
if (providerIdx === -1 || !argv[providerIdx + 1]) {
stderr.write('Usage: ai-review.mjs --provider claude|openai|gemini\n');
exit(1);
}
const provider = argv[providerIdx + 1];
if (!PROVIDERS[provider]) {
stderr.write(`Unknown provider: ${provider}. Must be one of: ${Object.keys(PROVIDERS).join(', ')}\n`);
exit(1);
}
return provider;
}
function readStdin() {
return readFileSync(stdin.fd, 'utf-8');
}
async function callApi(provider, diff) {
const config = PROVIDERS[provider];
const apiKey = process.env[config.envKey];
if (!apiKey) {
stderr.write(`Missing environment variable: ${config.envKey}\n`);
exit(1);
}
const promptTemplate = readFileSync(
new URL('ai-review-prompt.txt', import.meta.url),
'utf-8',
);
const prompt = promptTemplate + '\n\n' + diff;
const { headers, body } = config.buildRequest(apiKey, prompt);
const maxRetries = 2;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
const response = await fetch(config.url, {
method: 'POST',
headers,
body,
});
if (!response.ok) {
const errorText = await response.text();
stderr.write(`[${provider}] API error (${response.status}): ${errorText}\n`);
if (attempt < maxRetries && response.status >= 500) {
stderr.write(`[${provider}] Retrying in 5s... (attempt ${attempt + 1}/${maxRetries})\n`);
await new Promise((r) => setTimeout(r, 5000));
continue;
}
exit(1);
}
const json = await response.json();
const text = config.extractText(json);
if (!text) {
stderr.write(`[${provider}] Empty response from API\n`);
exit(1);
}
return text;
} catch (err) {
stderr.write(`[${provider}] Request failed: ${err.message}\n`);
if (attempt < maxRetries) {
stderr.write(`[${provider}] Retrying in 5s... (attempt ${attempt + 1}/${maxRetries})\n`);
await new Promise((r) => setTimeout(r, 5000));
continue;
}
exit(1);
}
}
}
function parseFindings(text) {
// Strip markdown code fences if present
const cleaned = text.replace(/^```(?:json)?\s*\n?/m, '').replace(/\n?```\s*$/m, '').trim();
try {
const parsed = JSON.parse(cleaned);
// Validate structure
if (!parsed.summary || !Array.isArray(parsed.findings)) {
stderr.write('Response missing required fields (summary, findings)\n');
return { summary: 'Parse error', findings: [] };
}
return parsed;
} catch (err) {
stderr.write(`Failed to parse JSON response: ${err.message}\n`);
stderr.write(`Raw response: ${text.substring(0, 500)}\n`);
return { summary: 'Parse error', findings: [] };
}
}
async function main() {
const provider = parseArgs();
const diff = readStdin();
if (!diff.trim()) {
stderr.write('No diff provided on stdin\n');
stdout.write(JSON.stringify({ summary: 'Empty diff', findings: [] }));
exit(0);
}
stderr.write(`[${provider}] Reviewing ${diff.split('\n').length} lines of diff...\n`);
const text = await callApi(provider, diff);
const findings = parseFindings(text);
findings.provider = provider;
stdout.write(JSON.stringify(findings, null, 2));
}
main();
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment