Skip to content

Instantly share code, notes, and snippets.

@luizdamim
Forked from vinicius73/README.md
Created September 18, 2025 12:10
Show Gist options
  • Save luizdamim/05edf4fd638a5fc3f9a7ca6a2f9c85de to your computer and use it in GitHub Desktop.
Save luizdamim/05edf4fd638a5fc3f9a7ca6a2f9c85de to your computer and use it in GitHub Desktop.
Bash scripts for cleaning and maintaining development projects.

Project Maintenance Scripts

Bash scripts for cleaning and maintaining development projects.

Available Scripts

Script Function
maintain-git-repos.sh Git repository maintenance (fetch, gc, prune, fsck)
clean-go-bins.sh Remove bin directories from Go projects
clean-node-modules.sh Remove node_modules and .serverless from Node.js projects
clean-rust-targets.sh Remove target directories from Rust projects
check-js-vulnerabilities.sh Check vulnerabilities in JavaScript projects

Basic Usage

# Make executable
chmod +x *.sh

# Run (interactive mode)
./maintain-git-repos.sh
./clean-go-bins.sh
./clean-node-modules.sh
./clean-rust-targets.sh
./check-js-vulnerabilities.sh

# Simulate operations (dry-run)
./clean-go-bins.sh --dry-run
./clean-node-modules.sh --dry-run
./clean-rust-targets.sh --dry-run

# Force cleanup (no confirmation)
./clean-go-bins.sh --force
./clean-node-modules.sh --force

Common Options

  • --help - Show help
  • --verbose - Verbose output
  • --dry-run - Simulate without executing
  • --force - Execute without confirmation (cleanup only)

Requirements

  • Bash 4.0+
  • Git
  • Specific tools: Go, Node.js/npm, Rust/Cargo

Security

  • All scripts verify Git repositories
  • Use --dry-run to test before executing
#!/bin/bash
# JavaScript Projects Vulnerability Check Script
# Purpose: Check for vulnerabilities in all JavaScript/Node.js projects in the current directory tree
set -e
# Color codes
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly BLUE='\033[0;34m'
readonly YELLOW='\033[1;33m'
readonly NC='\033[0m'
# Configuration
readonly SCRIPT_NAME="$(basename "$0")"
# Global state
VERBOSE=false
DRY_RUN=false
BASE_DIR="."
SEVERITY_LEVEL="low"
OUTPUT_FORMAT="summary"
AUTO_FIX=false
EXCLUDE_PATTERNS=()
OUTPUT_FILE=""
TIMEOUT_SECONDS=300
# Temporary files
TEMP_RESULTS_FILE=""
# Logging functions
log_message() {
local level="$1"
local message="$2"
local color=""
case "$level" in
"info") color="$BLUE" ;;
"success") color="$GREEN" ;;
"error") color="$RED" ;;
"warning") color="$YELLOW" ;;
"verbose")
[ "$VERBOSE" = true ] || return 0
color="$BLUE"
;;
esac
echo -e "${color}${message}${NC}" >&"$([ "$level" = "error" ] && echo 2 || echo 1)"
}
# Cleanup function
cleanup() {
[ -f "$TEMP_RESULTS_FILE" ] && rm -f "$TEMP_RESULTS_FILE"
}
# Error handling
die() {
log_message "error" "❌ $1"
cleanup
exit 1
}
# Help function
show_help() {
cat << EOF
JavaScript Projects Vulnerability Check Script
USAGE:
$SCRIPT_NAME [OPTIONS] [BASE_DIRECTORY]
DESCRIPTION:
Check for vulnerabilities in all JavaScript/Node.js projects in the specified directory tree.
Supports npm, Yarn (v1 and v2+), pnpm, and Bun package managers.
Only processes directories that contain both package.json and .git (Git repositories).
ARGUMENTS:
BASE_DIRECTORY Directory to search for JavaScript projects (default: current directory)
OPTIONS:
-v, --verbose Enable verbose output
-d, --dry-run Show what would be done without executing
-s, --severity LEVEL Minimum severity level to report (low|moderate|high|critical) (default: moderate)
-f, --format FORMAT Output format (summary|json) (default: summary)
-o, --output FILE Save report to file (in addition to console output)
--fix Attempt to automatically fix vulnerabilities where possible
--exclude PATTERN Exclude projects matching pattern (can be used multiple times)
--timeout SECONDS Timeout for audit commands (default: 300)
-h, --help Show this help message
EXAMPLES:
$SCRIPT_NAME # Check all JavaScript projects in current directory
$SCRIPT_NAME /path/to/projects # Check all JavaScript projects in /path/to/projects
$SCRIPT_NAME --verbose --dry-run # Show what would be checked in current directory
$SCRIPT_NAME --severity high # Only report high and critical vulnerabilities
$SCRIPT_NAME --format json # Output results in JSON format
$SCRIPT_NAME --output report.json # Save results to file
$SCRIPT_NAME --fix # Attempt to fix vulnerabilities automatically
$SCRIPT_NAME --exclude "test-*" # Exclude projects starting with "test-"
$SCRIPT_NAME --timeout 600 # Set 10-minute timeout for audit commands
EOF
}
# Parse command line arguments
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose) VERBOSE=true; shift ;;
-d|--dry-run) DRY_RUN=true; shift ;;
-s|--severity)
SEVERITY_LEVEL="$2"
[[ "$SEVERITY_LEVEL" =~ ^(low|moderate|high|critical)$ ]] ||
die "Invalid severity level: $SEVERITY_LEVEL. Must be one of: low, moderate, high, critical"
shift 2
;;
-f|--format)
OUTPUT_FORMAT="$2"
[[ "$OUTPUT_FORMAT" =~ ^(summary|json)$ ]] ||
die "Invalid output format: $OUTPUT_FORMAT. Must be one of: summary, json"
shift 2
;;
-o|--output) OUTPUT_FILE="$2"; shift 2 ;;
--fix) AUTO_FIX=true; shift ;;
--exclude) EXCLUDE_PATTERNS+=("$2"); shift 2 ;;
--timeout)
TIMEOUT_SECONDS="$2"
[[ "$TIMEOUT_SECONDS" =~ ^[0-9]+$ ]] && [ "$TIMEOUT_SECONDS" -gt 0 ] ||
die "Invalid timeout value: $TIMEOUT_SECONDS. Must be a positive integer."
shift 2
;;
-h|--help) show_help; exit 0 ;;
-*)
die "Unknown option: $1"
;;
*)
BASE_DIR="$1"
shift
;;
esac
done
}
# Check system dependencies
check_dependencies() {
local missing_deps=()
command -v jq >/dev/null 2>&1 || missing_deps+=("jq")
command -v timeout >/dev/null 2>&1 || log_message "warning" "⚠️ timeout command not found, timeout functionality will be limited"
if [ ${#missing_deps[@]} -gt 0 ]; then
die "Missing required dependencies: ${missing_deps[*]}. Please install: brew install jq (macOS) or sudo apt-get install jq (Ubuntu)"
fi
}
# Check if directory is a JavaScript project
is_js_project() {
local dir="$1"
[ -f "$dir/package.json" ] && [ -d "$dir/.git" ]
}
# Check if project should be excluded
is_excluded() {
local project_name="$1"
local project_path="$2"
for pattern in "${EXCLUDE_PATTERNS[@]}"; do
[[ "$project_name" == $pattern ]] || [[ "$project_path" == *"$pattern"* ]] && return 0
done
return 1
}
# Detect package manager
detect_package_manager() {
local project_dir="$1"
[ -f "$project_dir/pnpm-lock.yaml" ] && echo "pnpm" && return 0
[ -f "$project_dir/bun.lockb" ] && echo "bun" && return 0
[ -f "$project_dir/yarn.lock" ] && ([ -f "$project_dir/.yarnrc.yml" ] || [ -d "$project_dir/.yarn" ]) && echo "yarn2" && return 0
[ -f "$project_dir/yarn.lock" ] && echo "yarn1" && return 0
[ -f "$project_dir/package-lock.json" ] && echo "npm" && return 0
echo "npm" # default
}
# Check if package manager is available
is_package_manager_available() {
local manager="$1"
case "$manager" in
"npm"|"yarn1"|"yarn2") command -v npm >/dev/null 2>&1 || command -v yarn >/dev/null 2>&1 ;;
"pnpm") command -v pnpm >/dev/null 2>&1 ;;
"bun") command -v bun >/dev/null 2>&1 ;;
*) return 1 ;;
esac
}
# Get audit command
get_audit_command() {
local manager="$1"
local severity="$2"
case "$manager" in
"npm")
local cmd="npm audit --json"
[ "$severity" != "low" ] && cmd="$cmd --audit-level=$severity"
[ "$AUTO_FIX" = true ] && cmd="$cmd --fix"
echo "$cmd"
;;
"yarn1")
local cmd="yarn audit --json"
[ "$severity" != "low" ] && cmd="$cmd --level $severity"
echo "$cmd"
;;
"yarn2")
local cmd="yarn npm audit --json"
[ "$severity" != "low" ] && cmd="$cmd --severity=$severity"
echo "$cmd"
;;
"pnpm")
local cmd="pnpm audit --json"
[ "$severity" != "low" ] && cmd="$cmd --audit-level=$severity"
echo "$cmd"
;;
"bun")
echo "bun audit --json"
;;
esac
}
# Execute command with timeout
execute_with_timeout() {
local cmd="$1"
local timeout_sec="$2"
local output_file="$3"
if command -v timeout >/dev/null 2>&1; then
timeout "$timeout_sec" bash -c "$cmd" > "$output_file" 2>&1
else
bash -c "$cmd" > "$output_file" 2>&1
fi
}
# Parse audit output
parse_audit_output() {
local output="$1"
local manager="$2"
local critical=0 high=0 moderate=0 low=0 total=0
# Try JSON parsing first
if echo "$output" | jq -e '.metadata.vulnerabilities' >/dev/null 2>&1; then
# npm/yarn2/pnpm JSON format
critical=$(echo "$output" | jq -r '.metadata.vulnerabilities.critical // 0')
high=$(echo "$output" | jq -r '.metadata.vulnerabilities.high // 0')
moderate=$(echo "$output" | jq -r '.metadata.vulnerabilities.moderate // 0')
low=$(echo "$output" | jq -r '.metadata.vulnerabilities.low // 0')
elif echo "$output" | jq -e '.data.vulnerabilities' >/dev/null 2>&1; then
# Yarn v1 JSON format
critical=$(echo "$output" | jq -r '.data.vulnerabilities.critical // 0')
high=$(echo "$output" | jq -r '.data.vulnerabilities.high // 0')
moderate=$(echo "$output" | jq -r '.data.vulnerabilities.moderate // 0')
low=$(echo "$output" | jq -r '.data.vulnerabilities.low // 0')
else
# Fallback: parse text output
critical=$(echo "$output" | grep -o '[0-9]* critical' | grep -o '[0-9]*' || echo "0")
high=$(echo "$output" | grep -o '[0-9]* high' | grep -o '[0-9]*' || echo "0")
moderate=$(echo "$output" | grep -o '[0-9]* moderate' | grep -o '[0-9]*' || echo "0")
low=$(echo "$output" | grep -o '[0-9]* low' | grep -o '[0-9]*' || echo "0")
fi
total=$((critical + high + moderate + low))
echo "$critical $high $moderate $low $total"
}
# Validate project
validate_project() {
local project_dir="$1"
local project_name="$2"
if ! is_js_project "$project_dir"; then
log_message "verbose" "πŸ” Skipping $project_name: not a JavaScript project or not a Git repository"
return 1
fi
if is_excluded "$project_name" "$project_dir"; then
log_message "verbose" "πŸ” Skipping $project_name: matches exclusion pattern"
return 1
fi
# Check if dependencies are available (node_modules or .yarn/cache for Yarn v2+)
local has_dependencies=false
if [ -d "$project_dir/node_modules" ]; then
has_dependencies=true
elif [ -d "$project_dir/.yarn/cache" ] && [ -f "$project_dir/.yarnrc.yml" ]; then
has_dependencies=true
fi
if [ "$has_dependencies" = false ]; then
log_message "verbose" "πŸ” No dependencies found in $project_name, skipping audit"
return 1
fi
return 0
}
# Run audit for project
run_audit() {
local project_dir="$1"
local project_name="$2"
local manager="$3"
local audit_cmd=$(get_audit_command "$manager" "$SEVERITY_LEVEL")
[ -z "$audit_cmd" ] && die "No audit command available for $manager"
# Run audit
local temp_output_file=$(mktemp)
local cmd="cd '$project_dir' && $audit_cmd"
log_message "verbose" "πŸ” Running audit command: $audit_cmd (timeout: ${TIMEOUT_SECONDS}s)"
local exit_code=0
if ! execute_with_timeout "$cmd" "$TIMEOUT_SECONDS" "$temp_output_file"; then
exit_code=$?
fi
local audit_output=$(cat "$temp_output_file")
# Exit code 1 is normal when vulnerabilities are found
if [ $exit_code -eq 124 ]; then
log_message "error" "❌ Audit command timed out after ${TIMEOUT_SECONDS}s for $project_name"
return 1
elif [ $exit_code -ne 0 ] && [ $exit_code -ne 1 ]; then
log_message "error" "❌ Failed to audit $project_name (exit code: $exit_code)"
return 1
fi
# Parse and store results
local results=$(parse_audit_output "$audit_output" "$manager")
local critical=$(echo "$results" | cut -d' ' -f1)
local high=$(echo "$results" | cut -d' ' -f2)
local moderate=$(echo "$results" | cut -d' ' -f3)
local low=$(echo "$results" | cut -d' ' -f4)
local total=$(echo "$results" | cut -d' ' -f5)
if [ "$total" -gt 0 ]; then
log_message "warning" "⚠️ Found $total vulnerabilities in $project_name: $critical critical, $high high, $moderate moderate, $low low"
echo "$project_name|$critical|$high|$moderate|$low|$total" >> "$TEMP_RESULTS_FILE"
else
log_message "success" "βœ… No vulnerabilities found in $project_name"
fi
# JSON output is handled in main function
return 0
}
# Check single project
check_project() {
local project_dir="$1"
local project_name=$(basename "$project_dir")
log_message "verbose" "πŸ” Processing project: $project_name in $project_dir"
# Validate project
validate_project "$project_dir" "$project_name" || return 0
# Detect package manager
local manager=$(detect_package_manager "$project_dir")
log_message "verbose" "πŸ” Detected package manager: $manager for $project_name"
# Check if package manager is available
if ! is_package_manager_available "$manager"; then
log_message "warning" "⚠️ Package manager '$manager' not available, skipping $project_name"
return 0
fi
log_message "info" "ℹ️ Checking vulnerabilities in $project_name (using $manager)"
if [ "$DRY_RUN" = true ]; then
log_message "success" "βœ… Would check vulnerabilities in: $project_name"
return 0
fi
# Run audit
run_audit "$project_dir" "$project_name" "$manager"
}
# Generate summary report
generate_summary() {
local results_file="$1"
local silent_mode="${2:-false}"
[ ! -f "$results_file" ] && [ "$silent_mode" = false ] && log_message "info" "ℹ️ No vulnerability results to report" && return 0
local total_projects=0 projects_with_vulns=0 total_critical=0 total_high=0 total_moderate=0 total_low=0 total_vulns=0
# Count totals
while IFS='|' read -r project critical high moderate low total; do
((total_projects++))
if [ "$total" -gt 0 ]; then
((projects_with_vulns++))
total_critical=$((total_critical + critical))
total_high=$((total_high + high))
total_moderate=$((total_moderate + moderate))
total_low=$((total_low + low))
total_vulns=$((total_vulns + total))
fi
done < "$results_file"
# Generate report
local report_lines=()
report_lines+=("=== VULNERABILITY REPORT ===")
report_lines+=("Projects scanned: $total_projects")
report_lines+=("Projects with vulnerabilities: $projects_with_vulns")
if [ "$total_vulns" -gt 0 ]; then
report_lines+=("Total vulnerabilities found: $total_vulns")
[ "$total_critical" -gt 0 ] && report_lines+=(" - Critical: $total_critical")
[ "$total_high" -gt 0 ] && report_lines+=(" - High: $total_high")
[ "$total_moderate" -gt 0 ] && report_lines+=(" - Moderate: $total_moderate")
[ "$total_low" -gt 0 ] && report_lines+=(" - Low: $total_low")
report_lines+=("")
report_lines+=("Projects with vulnerabilities:")
while IFS='|' read -r project critical high moderate low total; do
if [ "$total" -gt 0 ]; then
local severity_info=""
[ "$critical" -gt 0 ] && severity_info="${severity_info}${critical}C "
[ "$high" -gt 0 ] && severity_info="${severity_info}${high}H "
[ "$moderate" -gt 0 ] && severity_info="${severity_info}${moderate}M "
[ "$low" -gt 0 ] && severity_info="${severity_info}${low}L"
report_lines+=(" - $project: $total total ($severity_info)")
fi
done < "$results_file"
else
report_lines+=("No vulnerabilities found in any project!")
fi
# Output report
for line in "${report_lines[@]}"; do
if [ "$silent_mode" = false ]; then
case "$line" in
*"Critical:"*) log_message "error" "$line" ;;
*"High:"*) log_message "error" "$line" ;;
*"Moderate:"*) log_message "warning" "$line" ;;
*"Low:"*) log_message "info" "$line" ;;
*"No vulnerabilities found"*) log_message "success" "$line" ;;
*"Total vulnerabilities found"*) log_message "warning" "$line" ;;
*"Projects with vulnerabilities:"*) log_message "info" "$line" ;;
*"Projects scanned:"*|*"Projects with vulnerabilities:"*) log_message "info" "$line" ;;
*"=== VULNERABILITY REPORT ==="*) log_message "info" "$line" ;;
*) echo "$line" ;;
esac
else
echo "$line"
fi
done
}
# Generate JSON report
generate_json_report() {
local results_file="$1"
if [ ! -f "$results_file" ]; then
echo '{"projects": [], "summary": {"total_projects": 0, "projects_with_vulnerabilities": 0, "total_vulnerabilities": 0, "critical": 0, "high": 0, "moderate": 0, "low": 0}}'
return 0
fi
local total_projects=0
local projects_with_vulns=0
local total_critical=0
local total_high=0
local total_moderate=0
local total_low=0
local total_vulns=0
local projects_json=""
# Count totals and build projects array
while IFS='|' read -r project critical high moderate low total; do
((total_projects++))
if [ "$total" -gt 0 ]; then
((projects_with_vulns++))
total_critical=$((total_critical + critical))
total_high=$((total_high + high))
total_moderate=$((total_moderate + moderate))
total_low=$((total_low + low))
total_vulns=$((total_vulns + total))
if [ -n "$projects_json" ]; then
projects_json="$projects_json,"
fi
projects_json="$projects_json{\"name\":\"$project\",\"critical\":$critical,\"high\":$high,\"moderate\":$moderate,\"low\":$low,\"total\":$total}"
fi
done < "$results_file"
echo "{\"projects\":[$projects_json],\"summary\":{\"total_projects\":$total_projects,\"projects_with_vulnerabilities\":$projects_with_vulns,\"total_vulnerabilities\":$total_vulns,\"critical\":$total_critical,\"high\":$total_high,\"moderate\":$total_moderate,\"low\":$total_low}}"
}
# Main execution function
main() {
log_message "info" "ℹ️ Starting JavaScript vulnerability check process..."
# Parse arguments and validate
parse_args "$@"
check_dependencies
# Validate base directory
[ ! -d "$BASE_DIR" ] && die "Directory '$BASE_DIR' does not exist or is not accessible"
BASE_DIR=$(cd "$BASE_DIR" && pwd)
log_message "verbose" "πŸ” Searching for JavaScript projects in: $BASE_DIR"
# Create temporary results file
TEMP_RESULTS_FILE=$(mktemp)
# Find projects
local package_files
package_files=$(find "$BASE_DIR" -name 'package.json' -type f \
-not -path '*/node_modules/*' \
-not -path '*/.git/*' \
-not -path '*/target/*' \
-not -path '*/vendor/*' | sort)
[ -z "$package_files" ] && log_message "info" "ℹ️ No package.json files found" && exit 0
local project_count=$(echo "$package_files" | wc -l)
log_message "info" "ℹ️ Found $project_count JavaScript project(s)"
[ "$DRY_RUN" = true ] && log_message "info" "ℹ️ DRY RUN MODE - No actual vulnerability checks will be performed"
[ "$AUTO_FIX" = true ] && log_message "info" "ℹ️ AUTO FIX MODE - Will attempt to fix vulnerabilities where possible"
log_message "info" "ℹ️ Severity level: $SEVERITY_LEVEL"
log_message "info" "ℹ️ Output format: $OUTPUT_FORMAT"
# Process projects
local processed=0 failed=0
while IFS= read -r package_file; do
local project_dir=$(dirname "$package_file")
if check_project "$project_dir"; then
((processed++))
else
((failed++))
fi
done <<< "$package_files"
# Generate reports
if [ "$OUTPUT_FORMAT" = "json" ]; then
local json_output=$(generate_json_report "$TEMP_RESULTS_FILE")
if [ -n "$OUTPUT_FILE" ]; then
echo "$json_output" > "$OUTPUT_FILE"
log_message "success" "βœ… JSON report saved to: $OUTPUT_FILE"
else
echo "$json_output"
fi
else
if [ -n "$OUTPUT_FILE" ]; then
{
echo "JavaScript Vulnerability Check Report"
echo "Generated on: $(date)"
echo "Base directory: $BASE_DIR"
echo "Severity level: $SEVERITY_LEVEL"
echo "=========================================="
echo
generate_summary "$TEMP_RESULTS_FILE" true
echo
echo "=== SCAN SUMMARY ==="
echo "Projects processed: $processed"
[ $failed -gt 0 ] && echo "Projects failed: $failed"
} > "$OUTPUT_FILE"
log_message "success" "βœ… Report saved to: $OUTPUT_FILE"
fi
generate_summary "$TEMP_RESULTS_FILE"
fi
# Final status
echo
log_message "info" "ℹ️ === SCAN SUMMARY ==="
log_message "success" "βœ… Projects processed: $processed"
[ $failed -gt 0 ] && die "Projects failed: $failed"
if [ "$DRY_RUN" = true ]; then
log_message "warning" "⚠️ This was a dry run. Use without --dry-run to actually check vulnerabilities."
else
log_message "success" "βœ… JavaScript vulnerability check completed successfully!"
fi
}
# Set up cleanup trap
trap cleanup EXIT
# Execute main function
main "$@"
#!/bin/bash
# Go Bins Clean Script
# Purpose: Clean bin directories from all Go projects in the current directory tree
set -e
# Simple color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Global variables
VERBOSE=false
DRY_RUN=false
BASE_DIR="."
INTERACTIVE=false
FORCE=false
# Simple logging functions
log_info() {
echo -e "${BLUE}ℹ️ $1${NC}"
}
log_success() {
echo -e "${GREEN}βœ… $1${NC}"
}
log_error() {
echo -e "${RED}❌ $1${NC}" >&2
}
log_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
log_verbose() {
[ "$VERBOSE" = true ] && echo -e "${BLUE}πŸ” $1${NC}"
}
# Help function
show_help() {
cat << EOF
Go Bins Clean Script
USAGE:
$0 [OPTIONS] [BASE_DIRECTORY]
DESCRIPTION:
Clean bin directories from all Go projects in the specified directory tree.
Only processes directories that contain both go.mod and .git (Git repositories).
ARGUMENTS:
BASE_DIRECTORY Directory to search for Go projects (default: current directory)
OPTIONS:
-v, --verbose Enable verbose output
-d, --dry-run Show what would be done without executing
-i, --interactive Ask for confirmation before removing each bin directory
-f, --force Remove all bin directories without asking (non-interactive)
-h, --help Show this help message
EXAMPLES:
$0 # Clean all Go projects in current directory (interactive)
$0 /path/to/projects # Clean all Go projects in /path/to/projects
$0 --verbose --dry-run # Show what would be cleaned in current directory
$0 --dry-run /path/to/projects # Show what would be cleaned in /path/to/projects
$0 --force # Remove all bin directories without asking
$0 --interactive # Ask for confirmation for each bin directory (default)
EOF
}
# Parse command line arguments
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
-i|--interactive)
INTERACTIVE=true
shift
;;
-f|--force)
FORCE=true
shift
;;
-h|--help)
show_help
exit 0
;;
-*)
log_error "Unknown option: $1"
show_help
exit 1
;;
*)
# This is the base directory argument
BASE_DIR="$1"
shift
;;
esac
done
# Set default interactive mode if neither force nor interactive is specified
if [ "$FORCE" = false ] && [ "$INTERACTIVE" = false ]; then
INTERACTIVE=true
fi
}
# Check if a directory is a Go project
is_go_project() {
local dir="$1"
[ -f "$dir/go.mod" ] && [ -d "$dir/.git" ]
}
# Get directory size in human readable format
get_dir_size() {
local dir="$1"
if [ -d "$dir" ]; then
du -sh "$dir" 2>/dev/null | cut -f1 || echo "unknown"
else
echo "0B"
fi
}
# Ask for user confirmation
ask_confirmation() {
local bin_dir="$1"
local project_name="$2"
local size="$3"
if [ "$DRY_RUN" = true ]; then
return 0
fi
if [ "$FORCE" = true ]; then
return 0
fi
if [ "$INTERACTIVE" = true ]; then
echo
log_warning "Found bin directory in project: $project_name"
log_info "Location: $bin_dir"
log_info "Size: $size"
echo -n "Do you want to remove this bin directory? (y/N): "
read -r response
case "$response" in
[yY]|[yY][eE][sS])
return 0
;;
*)
return 1
;;
esac
fi
return 0
}
# Clean a single project
clean_project() {
local project_dir="$1"
local project_name=$(basename "$project_dir")
log_verbose "Processing project: $project_name in $project_dir"
if ! is_go_project "$project_dir"; then
log_verbose "Skipping $project_name: not a Go project or not a Git repository"
return 0
fi
# Find all bin directories in the project
local bin_dirs
bin_dirs=$(find "$project_dir" -type d -name "bin" 2>/dev/null || true)
if [ -z "$bin_dirs" ]; then
log_verbose "No bin directories found in $project_name"
return 0
fi
local cleaned_dirs=()
local skipped_dirs=()
local failed_dirs=()
while IFS= read -r bin_dir; do
if [ -z "$bin_dir" ]; then
continue
fi
local size=$(get_dir_size "$bin_dir")
local relative_path="${bin_dir#$project_dir/}"
log_verbose "Found bin directory: $relative_path ($size) in $project_name"
if ask_confirmation "$bin_dir" "$project_name" "$size"; then
if [ "$DRY_RUN" = true ]; then
log_success "Would remove: $relative_path ($size) from $project_name"
cleaned_dirs+=("$relative_path")
else
if rm -rf "$bin_dir"; then
log_success "Removed: $relative_path ($size) from $project_name"
cleaned_dirs+=("$relative_path")
else
log_error "Failed to remove: $relative_path from $project_name"
failed_dirs+=("$relative_path")
fi
fi
else
log_info "Skipped: $relative_path from $project_name"
skipped_dirs+=("$relative_path")
fi
done <<< "$bin_dirs"
# Return status based on results
if [ ${#failed_dirs[@]} -gt 0 ]; then
return 1
fi
return 0
}
# Main execution function
main() {
log_info "Starting Go bins clean process..."
# Parse arguments
parse_args "$@"
# Validate base directory
if [ ! -d "$BASE_DIR" ]; then
log_error "Directory '$BASE_DIR' does not exist or is not accessible"
exit 1
fi
# Convert to absolute path
BASE_DIR=$(cd "$BASE_DIR" && pwd)
log_verbose "Searching for Go projects in: $BASE_DIR"
# Find all go.mod files, excluding common build directories
local go_files
go_files=$(find "$BASE_DIR" -name 'go.mod' -type f \
-not -path '*/target/*' \
-not -path '*/.git/*' \
-not -path '*/node_modules/*' \
-not -path '*/vendor/*' | sort)
if [ -z "$go_files" ]; then
log_info "No go.mod files found"
exit 0
fi
local project_count=$(echo "$go_files" | wc -l)
log_info "Found $project_count Go project(s)"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN MODE - No actual cleaning will be performed"
fi
if [ "$FORCE" = true ]; then
log_info "FORCE MODE - All bin directories will be removed without asking"
elif [ "$INTERACTIVE" = true ]; then
log_info "INTERACTIVE MODE - You will be asked for confirmation for each bin directory"
fi
# Process projects
local processed=0
local failed=0
while IFS= read -r go_file; do
local project_dir=$(dirname "$go_file")
if clean_project "$project_dir"; then
((processed++))
else
((failed++))
fi
done <<< "$go_files"
# Generate final report
echo
log_info "=== CLEANUP REPORT ==="
log_success "Projects processed: $processed"
if [ $failed -gt 0 ]; then
log_error "Projects failed: $failed"
exit 1
fi
if [ "$DRY_RUN" = true ]; then
log_warning "This was a dry run. Use without --dry-run to actually clean directories."
else
log_success "Go bins cleanup completed successfully!"
fi
}
# Execute main function
main "$@"
#!/bin/bash
# Node Modules Clean Script
# Purpose: Clean node_modules and .serverless directories from all Node.js projects in the current directory tree
set -e
# Simple color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Global variables
VERBOSE=false
DRY_RUN=false
BASE_DIR="."
INCLUDE_SERVERLESS=true
# Simple logging functions
log_info() {
echo -e "${BLUE}ℹ️ $1${NC}"
}
log_success() {
echo -e "${GREEN}βœ… $1${NC}"
}
log_error() {
echo -e "${RED}❌ $1${NC}" >&2
}
log_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
log_verbose() {
[ "$VERBOSE" = true ] && echo -e "${BLUE}πŸ” $1${NC}"
}
# Help function
show_help() {
cat << EOF
Node Modules Clean Script
USAGE:
$0 [OPTIONS] [BASE_DIRECTORY]
DESCRIPTION:
Clean node_modules and .serverless directories from all Node.js projects in the specified directory tree.
Only processes directories that contain both package.json and .git (Git repositories).
ARGUMENTS:
BASE_DIRECTORY Directory to search for Node.js projects (default: current directory)
OPTIONS:
-v, --verbose Enable verbose output
-d, --dry-run Show what would be done without executing
--no-serverless Skip .serverless directories (only clean node_modules)
-h, --help Show this help message
EXAMPLES:
$0 # Clean all Node.js projects in current directory
$0 /path/to/projects # Clean all Node.js projects in /path/to/projects
$0 --verbose --dry-run # Show what would be cleaned in current directory
$0 --dry-run /path/to/projects # Show what would be cleaned in /path/to/projects
$0 --no-serverless # Only clean node_modules, skip .serverless
EOF
}
# Parse command line arguments
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
--no-serverless)
INCLUDE_SERVERLESS=false
shift
;;
-h|--help)
show_help
exit 0
;;
-*)
log_error "Unknown option: $1"
show_help
exit 1
;;
*)
# This is the base directory argument
BASE_DIR="$1"
shift
;;
esac
done
}
# Check if a directory is a Node.js project
is_node_project() {
local dir="$1"
[ -f "$dir/package.json" ] && [ -d "$dir/.git" ]
}
# Get directory size in human readable format
get_dir_size() {
local dir="$1"
if [ -d "$dir" ]; then
du -sh "$dir" 2>/dev/null | cut -f1 || echo "unknown"
else
echo "0B"
fi
}
# Clean a single project
clean_project() {
local project_dir="$1"
local project_name=$(basename "$project_dir")
log_verbose "Processing project: $project_name in $project_dir"
if ! is_node_project "$project_dir"; then
log_verbose "Skipping $project_name: not a Node.js project or not a Git repository"
return 0
fi
local total_size=""
local dirs_to_clean=()
local sizes=()
# Check for node_modules
if [ -d "$project_dir/node_modules" ]; then
local size=$(get_dir_size "$project_dir/node_modules")
dirs_to_clean+=("node_modules")
sizes+=("$size")
total_size="$size"
fi
# Check for .serverless if enabled
if [ "$INCLUDE_SERVERLESS" = true ] && [ -d "$project_dir/.serverless" ]; then
local size=$(get_dir_size "$project_dir/.serverless")
dirs_to_clean+=(".serverless")
if [ -n "$total_size" ]; then
total_size="${total_size}+${size}"
else
total_size="$size"
fi
sizes+=("$size")
fi
if [ ${#dirs_to_clean[@]} -eq 0 ]; then
log_verbose "No directories to clean in $project_name"
return 0
fi
log_info "Cleaning project $project_name (total size: $total_size)"
local cleaned_dirs=()
local failed_dirs=()
for i in "${!dirs_to_clean[@]}"; do
local dir_name="${dirs_to_clean[$i]}"
local dir_path="$project_dir/$dir_name"
local dir_size="${sizes[$i]}"
if [ "$DRY_RUN" = true ]; then
log_success "Would remove: $dir_name ($dir_size) from $project_name"
cleaned_dirs+=("$dir_name")
else
if rm -rf "$dir_path"; then
log_success "Removed: $dir_name ($dir_size) from $project_name"
cleaned_dirs+=("$dir_name")
else
log_error "Failed to remove: $dir_name from $project_name"
failed_dirs+=("$dir_name")
fi
fi
done
# Return status based on results
if [ ${#failed_dirs[@]} -gt 0 ]; then
return 1
fi
return 0
}
# Main execution function
main() {
log_info "Starting node modules clean process..."
# Parse arguments
parse_args "$@"
# Validate base directory
if [ ! -d "$BASE_DIR" ]; then
log_error "Directory '$BASE_DIR' does not exist or is not accessible"
exit 1
fi
# Convert to absolute path
BASE_DIR=$(cd "$BASE_DIR" && pwd)
log_verbose "Searching for Node.js projects in: $BASE_DIR"
# Find all package.json files, excluding common build directories
local package_files
package_files=$(find "$BASE_DIR" -name 'package.json' -type f \
-not -path '*/node_modules/*' \
-not -path '*/.git/*' \
-not -path '*/target/*' \
-not -path '*/vendor/*' | sort)
if [ -z "$package_files" ]; then
log_info "No package.json files found"
exit 0
fi
local project_count=$(echo "$package_files" | wc -l)
log_info "Found $project_count Node.js project(s)"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN MODE - No actual cleaning will be performed"
fi
if [ "$INCLUDE_SERVERLESS" = false ]; then
log_info "SERVERLESS DISABLED - Only cleaning node_modules directories"
fi
# Process projects
local cleaned=0
local skipped=0
local failed=0
while IFS= read -r package_file; do
local project_dir=$(dirname "$package_file")
if clean_project "$project_dir"; then
((cleaned++))
else
((failed++))
fi
done <<< "$package_files"
# Generate final report
echo
log_info "=== CLEANUP REPORT ==="
log_success "Projects processed: $cleaned"
if [ $failed -gt 0 ]; then
log_error "Projects failed: $failed"
exit 1
fi
if [ "$DRY_RUN" = true ]; then
log_warning "This was a dry run. Use without --dry-run to actually clean directories."
else
log_success "Node modules cleanup completed successfully!"
fi
}
# Execute main function
main "$@"
#!/bin/bash
# Rust Targets Clean Script
# Purpose: Clean Cargo build artifacts (target directories) from all Rust projects in the current directory tree
set -e
# Simple color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Global variables
VERBOSE=false
DRY_RUN=false
BASE_DIR="."
# Simple logging functions
log_info() {
echo -e "${BLUE}ℹ️ $1${NC}"
}
log_success() {
echo -e "${GREEN}βœ… $1${NC}"
}
log_error() {
echo -e "${RED}❌ $1${NC}" >&2
}
log_verbose() {
[ "$VERBOSE" = true ] && echo -e "${BLUE}πŸ” $1${NC}"
}
# Help function
show_help() {
cat << EOF
Rust Targets Clean Script
USAGE:
$0 [OPTIONS] [BASE_DIRECTORY]
DESCRIPTION:
Clean Cargo build artifacts from all Rust projects in the specified directory tree.
Only processes directories that contain both Cargo.toml and .git (Git repositories).
ARGUMENTS:
BASE_DIRECTORY Directory to search for Rust projects (default: current directory)
OPTIONS:
-v, --verbose Enable verbose output
-d, --dry-run Show what would be done without executing
-h, --help Show this help message
EXAMPLES:
$0 # Clean all Rust projects in current directory
$0 /path/to/projects # Clean all Rust projects in /path/to/projects
$0 --verbose --dry-run # Show what would be cleaned in current directory
$0 --dry-run /path/to/projects # Show what would be cleaned in /path/to/projects
EOF
}
# Parse command line arguments
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
-h|--help)
show_help
exit 0
;;
-*)
log_error "Unknown option: $1"
show_help
exit 1
;;
*)
# This is the base directory argument
BASE_DIR="$1"
shift
;;
esac
done
}
# Check if a directory is a Cargo workspace root
is_workspace_root() {
local dir="$1"
if [ -f "$dir/Cargo.toml" ]; then
# Check if Cargo.toml contains [workspace] section (exact match)
if grep -q "^\[workspace\]" "$dir/Cargo.toml" 2>/dev/null; then
return 0
fi
fi
return 1
}
# Check if a directory is a workspace member (not root)
is_workspace_member() {
local dir="$1"
if [ -f "$dir/Cargo.toml" ]; then
# Check if this Cargo.toml references workspace (is a member)
if grep -q "\.workspace\s*=" "$dir/Cargo.toml" 2>/dev/null; then
return 0
fi
fi
return 1
}
# Find the workspace root for a given directory (limited search)
find_workspace_root() {
local current_dir="$1"
local original_dir="$current_dir"
local search_depth=0
local max_depth=10 # Limit search depth for performance
while [ "$current_dir" != "/" ] && [ -n "$current_dir" ] && [ $search_depth -lt $max_depth ]; do
if is_workspace_root "$current_dir"; then
echo "$current_dir"
return 0
fi
current_dir=$(dirname "$current_dir")
((search_depth++))
done
# If no workspace found, return the original directory
echo "$original_dir"
}
# Clean a single project
clean_project() {
local project_dir="$1"
local project_name=$(basename "$project_dir")
log_verbose "Processing project: $project_name in $project_dir"
# Find the actual workspace root
local workspace_root=$(find_workspace_root "$project_dir")
local workspace_name=$(basename "$workspace_root")
# Check if the workspace root is a Git repository
if [ ! -d "$workspace_root/.git" ]; then
log_verbose "Skipping $project_name: workspace root '$workspace_name' is not a Git repository"
return 0
fi
# Only process if this is the workspace root (avoid processing subdirectories multiple times)
if [ "$project_dir" != "$workspace_root" ]; then
log_verbose "Skipping $project_name: will be processed as part of workspace '$workspace_name'"
return 0
fi
# Show target size if it exists, but process regardless
local target_size=""
if [ -d "$workspace_root/target" ]; then
target_size=" (target size: $(du -sh "$workspace_root/target" 2>/dev/null | cut -f1 || echo "unknown"))"
fi
if is_workspace_root "$workspace_root"; then
log_info "Cleaning workspace $workspace_name$target_size"
else
log_info "Cleaning project $workspace_name$target_size"
fi
if [ "$DRY_RUN" = true ]; then
if is_workspace_root "$workspace_root"; then
log_success "Would clean workspace: $workspace_name"
else
log_success "Would clean project: $workspace_name"
fi
else
if cd "$workspace_root" && cargo clean; then
if is_workspace_root "$workspace_root"; then
log_success "Cleaned workspace: $workspace_name"
else
log_success "Cleaned project: $workspace_name"
fi
else
log_error "Failed to clean: $workspace_name"
return 1
fi
fi
}
# Main execution function
main() {
log_info "Starting Rust targets clean process..."
# Parse arguments
parse_args "$@"
# Validate base directory
if [ ! -d "$BASE_DIR" ]; then
log_error "Directory '$BASE_DIR' does not exist or is not accessible"
exit 1
fi
# Convert to absolute path
BASE_DIR=$(cd "$BASE_DIR" && pwd)
log_verbose "Searching for Rust projects in: $BASE_DIR"
# Find all Cargo.toml files, excluding common build directories
local cargo_files
cargo_files=$(find "$BASE_DIR" -name 'Cargo.toml' -type f \
-not -path '*/target/*' \
-not -path '*/.git/*' \
-not -path '*/node_modules/*' \
-not -path '*/vendor/*' | sort)
if [ -z "$cargo_files" ]; then
log_info "No Cargo.toml files found"
exit 0
fi
local project_count=$(echo "$cargo_files" | wc -l)
log_info "Found $project_count Rust project(s)"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN MODE - No actual cleaning will be performed"
fi
# Process projects
local cleaned=0
local skipped=0
local failed=0
while IFS= read -r cargo_file; do
local project_dir=$(dirname "$cargo_file")
local workspace_root=$(find_workspace_root "$project_dir")
# Skip if this is a workspace member (not root)
if is_workspace_member "$project_dir"; then
log_verbose "Skipping $project_dir: is a workspace member"
continue
fi
# Only process if this is the workspace root (avoid double counting)
if [ "$project_dir" = "$workspace_root" ]; then
# Check if the workspace root is a Git repository
if [ -d "$workspace_root/.git" ]; then
# Process regardless of target directory existence (cargo clean works without target/)
if clean_project "$project_dir"; then
((cleaned++))
else
((failed++))
fi
else
log_verbose "Skipping $workspace_root: not a Git repository"
((skipped++))
fi
fi
done <<< "$cargo_files"
# Generate final report
echo
log_info "=== CLEANUP REPORT ==="
log_success "Projects cleaned: $cleaned"
log_info "Projects skipped: $skipped"
if [ $failed -gt 0 ]; then
log_error "Projects failed: $failed"
exit 1
fi
}
# Execute main function
main "$@"
#!/bin/bash
# Git Maintenance Script
# Purpose: Perform safe Git maintenance operations on all Git repositories in the current directory tree
# set -e # Disabled to prevent premature exits
# Simple color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
# Global variables
VERBOSE=false
DRY_RUN=false
BASE_DIR="."
CUSTOM_COMMANDS=""
# Statistics tracking
TOTAL_REPOS=0
CLEAN_REPOS=0
DIRTY_REPOS=0
AHEAD_REPOS=0
BEHIND_REPOS=0
DIVERGED_REPOS=0
STASHED_REPOS=0
LARGE_REPOS=0
OLD_REPOS=0
# Failure tracking
FAILED_REPOS=()
# Simple logging functions
log_info() {
echo -e "${BLUE}ℹ️ $1${NC}"
}
log_success() {
echo -e "${GREEN}βœ… $1${NC}"
}
log_error() {
echo -e "${RED}❌ $1${NC}" >&2
}
log_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
log_verbose() {
[ "$VERBOSE" = true ] && echo -e "${BLUE}πŸ” $1${NC}"
}
log_insight() {
echo -e "${CYAN}πŸ’‘ $1${NC}"
}
log_stat() {
echo -e "${MAGENTA}πŸ“Š $1${NC}"
}
# Standardized logging functions for consistent formatting
log_repo_processing() {
local repo_name="$1"
local relative_path="$2"
local repo_size="$3"
local repo_age="$4"
local status="$5"
log_info "Processing repository: $repo_name (path: $relative_path, size: $repo_size, age: ${repo_age} days, status: $status)"
}
log_command_result() {
local cmd="$1"
local repo_name="$2"
local success="$3"
local custom_msg="$4"
if [ "$success" = true ]; then
if [ -n "$custom_msg" ]; then
log_success "$custom_msg: $repo_name"
else
log_success "Successfully executed $cmd: $repo_name"
fi
else
if [ -n "$custom_msg" ]; then
log_warning "$custom_msg: $repo_name"
else
log_warning "Failed to execute $cmd: $repo_name"
fi
fi
}
log_repo_skipped() {
local repo_name="$1"
local reason="$2"
log_verbose "Skipping $repo_name: $reason"
}
# Help function
show_help() {
cat << EOF
Git Maintenance Script
USAGE:
$0 [OPTIONS] [BASE_DIRECTORY]
DESCRIPTION:
Perform safe Git maintenance operations on all Git repositories in the specified directory tree.
Only performs read-only operations and safe optimizations that cannot break repositories.
ARGUMENTS:
BASE_DIRECTORY Directory to search for Git repositories (default: current directory)
OPTIONS:
-v, --verbose Enable verbose output
-d, --dry-run Show what would be done without executing
--commands COMMANDS Comma-separated list of Git commands to execute
-h, --help Show this help message
EXAMPLES:
$0 # Perform safe Git maintenance on all repositories
$0 /path/to/projects # Perform safe Git maintenance in /path/to/projects
$0 --verbose # Enable verbose output during maintenance
$0 --dry-run # Show what would be done without executing
$0 --commands "pull,status" # Execute custom Git commands (pull, status)
$0 --commands "fetch" # Execute only fetch command
$0 --dry-run --commands "pull,merge" # Show what custom commands would do
SAFE OPERATIONS (Default):
- git fetch --all --prune Update remote references and remove stale branches
- git gc --auto Garbage collection and optimization
- git prune Remove unreachable objects
- git fsck --no-dangling Check repository integrity
EXTRA COMMANDS (with --commands):
- pull Pull latest changes (safe with clean working directory)
- status Show repository status
- log Show commit history
- branch List branches
- remote Show remote information
- config Show configuration
- diff Show differences
- merge Merge branches (BLOCKED - too dangerous)
- rebase Rebase commits (BLOCKED - too dangerous)
- reset Reset repository state (BLOCKED - too dangerous)
- clean Clean untracked files (BLOCKED - too dangerous)
NOTE: Commands that modify repository state (merge, rebase, reset, clean) are
blocked for safety reasons. Use these commands manually if needed.
EOF
}
# Define safe operations (default commands)
get_safe_operations() {
echo "fetch,gc,prune,fsck"
}
# Check if a command requires a clean working directory
requires_clean_working_dir() {
local cmd="$1"
[ "$cmd" = "pull" ]
}
# Check if a command is potentially dangerous
is_dangerous_command() {
local cmd="$1"
case "$cmd" in
merge|rebase|reset|clean)
return 0
;;
*)
return 1
;;
esac
}
# Validate if a command is supported
is_valid_command() {
local cmd="$1"
case "$cmd" in
fetch|gc|prune|fsck|pull|status|log|branch|remote|config|diff|merge|rebase|reset|clean)
return 0
;;
*)
return 1
;;
esac
}
# Parse command line arguments
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
--commands)
if [ -z "$2" ]; then
log_error "Error: --commands requires a value"
show_help
exit 1
fi
CUSTOM_COMMANDS="$2"
# Validate commands
IFS=',' read -ra COMMANDS <<< "$CUSTOM_COMMANDS"
for cmd in "${COMMANDS[@]}"; do
cmd=$(echo "$cmd" | xargs) # trim whitespace
if [ -z "$cmd" ]; then
log_error "Error: Empty command found in --commands"
exit 1
fi
done
shift 2
;;
-h|--help)
show_help
exit 0
;;
-*)
log_error "Unknown option: $1"
show_help
exit 1
;;
*)
BASE_DIR="$1"
shift
;;
esac
done
}
# Check if a directory is a Git repository
is_git_repo() {
local dir="$1"
[ -d "$dir/.git" ]
}
# Get repository size in human readable format
get_repo_size() {
local repo_dir="$1"
if [ -d "$repo_dir" ]; then
du -sh "$repo_dir" 2>/dev/null | cut -f1 || echo "unknown"
else
echo "0B"
fi
}
# Convert size string to MB for comparison
size_to_mb() {
local size_str="$1"
local size_mb=0
# Extract number and unit
if [[ "$size_str" =~ ^([0-9]+\.?[0-9]*)([KMGTPE]?B?)$ ]]; then
local number="${BASH_REMATCH[1]}"
local unit="${BASH_REMATCH[2]}"
# Convert number to integer (truncate decimal part)
local int_number=$(echo "$number" | cut -d. -f1)
# Convert to MB based on unit
case "$unit" in
"B"|"")
size_mb=$((int_number / 1024 / 1024))
;;
"K"|"KB")
size_mb=$((int_number / 1024))
;;
"M"|"MB")
size_mb=$int_number
;;
"G"|"GB")
size_mb=$((int_number * 1024))
;;
"T"|"TB")
size_mb=$((int_number * 1024 * 1024))
;;
*)
size_mb=0
;;
esac
fi
echo "$size_mb"
}
# Get repository age (days since last commit)
get_repo_age() {
local repo_dir="$1"
if [ -d "$repo_dir/.git" ]; then
local last_commit=$(cd "$repo_dir" && git log -1 --format="%ct" 2>/dev/null || echo "0")
if [ "$last_commit" != "0" ]; then
local current_time=$(date +%s)
local days_ago=$(( (current_time - last_commit) / 86400 ))
echo "$days_ago"
else
echo "unknown"
fi
else
echo "unknown"
fi
}
# Execute a Git command and handle result
execute_git_command() {
local cmd="$1"
local repo_name="$2"
local success_msg="$3"
local fail_msg="$4"
local is_display_command="$5"
local success=false
local output=""
if [ "$is_display_command" = true ]; then
echo
log_info "=== $success_msg for $repo_name ==="
if output=$(eval "git $cmd" 2>/dev/null); then
echo "$output"
success=true
fi
echo
else
if eval "git $cmd" 2>/dev/null; then
success=true
fi
fi
if [ "$success" = true ]; then
log_command_result "" "$repo_name" true "$success_msg"
return 0
else
log_command_result "" "$repo_name" false "$fail_msg"
return 1
fi
}
# Handle command execution result
handle_command_result() {
local cmd="$1"
local success="$2"
local operations_performed_ref="$3"
local operations_failed_ref="$4"
if [ "$success" = true ]; then
eval "$operations_performed_ref+=(\"$cmd\")"
else
eval "$operations_failed_ref+=(\"$cmd\")"
fi
}
# Check if working directory is clean (reuse from analyze_repo_status)
is_working_directory_clean() {
local status_output=$(git status --porcelain 2>/dev/null)
if [ $? -eq 0 ]; then
if [ -n "$status_output" ]; then
return 1 # Not clean
else
return 0 # Clean
fi
else
return 1 # Error, assume not clean
fi
}
# Analyze repository status
analyze_repo_status() {
local repo_dir="$1"
local status_info=""
if ! is_git_repo "$repo_dir"; then
echo "not_git"
return
fi
if ! cd "$repo_dir"; then
log_error "Failed to change to repository directory: $repo_dir"
return 1
fi
# Check working directory status
local working_status=0
local status_output=$(git status --porcelain 2>/dev/null)
if [ $? -eq 0 ]; then
# Check if status_output is empty (clean repo) or has content (dirty repo)
if [ -n "$status_output" ]; then
working_status=$(echo "$status_output" | wc -l)
else
working_status=0
fi
fi
if [ "$working_status" -gt 0 ]; then
status_info="${status_info}dirty,"
else
status_info="${status_info}clean,"
fi
# Check if repository is ahead/behind
local branch="detached"
local branch_output=$(git branch --show-current 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$branch_output" ]; then
branch="$branch_output"
fi
if [ "$branch" != "detached" ]; then
local upstream=""
local upstream_output=$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$upstream_output" ]; then
upstream="$upstream_output"
fi
if [ -n "$upstream" ]; then
local ahead="0"
local behind="0"
local ahead_output=$(git rev-list --count @{u}..HEAD 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$ahead_output" ]; then
ahead="$ahead_output"
fi
local behind_output=$(git rev-list --count HEAD..@{u} 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$behind_output" ]; then
behind="$behind_output"
fi
if [ "$ahead" -gt 0 ] && [ "$behind" -gt 0 ]; then
status_info="${status_info}diverged,"
elif [ "$ahead" -gt 0 ]; then
status_info="${status_info}ahead,"
elif [ "$behind" -gt 0 ]; then
status_info="${status_info}behind,"
else
status_info="${status_info}up_to_date,"
fi
else
status_info="${status_info}no_upstream,"
fi
else
status_info="${status_info}detached,"
fi
# Check for stashes
local stash_count=0
local stash_output=$(git stash list 2>/dev/null)
if [ $? -eq 0 ]; then
stash_count=$(echo "$stash_output" | wc -l)
fi
if [ "$stash_count" -gt 0 ]; then
status_info="${status_info}stashed,"
fi
# Remove trailing comma
status_info="${status_info%,}"
echo "$status_info"
}
# Perform maintenance on a single repository
maintain_repo() {
local repo_dir="$1"
local repo_name=$(basename "$repo_dir")
local repo_size=$(get_repo_size "$repo_dir")
local repo_age=$(get_repo_age "$repo_dir")
if ! is_git_repo "$repo_dir"; then
log_repo_skipped "$repo_name" "not a Git repository"
return 0
fi
# Analyze repository status
local status=$(analyze_repo_status "$repo_dir")
# Update statistics
((TOTAL_REPOS++))
if [[ "$status" == *"clean"* ]]; then
((CLEAN_REPOS++))
elif [[ "$status" == *"dirty"* ]]; then
((DIRTY_REPOS++))
fi
if [[ "$status" == *"ahead"* ]]; then
((AHEAD_REPOS++))
elif [[ "$status" == *"behind"* ]]; then
((BEHIND_REPOS++))
elif [[ "$status" == *"diverged"* ]]; then
((DIVERGED_REPOS++))
fi
if [[ "$status" == *"stashed"* ]]; then
((STASHED_REPOS++))
fi
# Check if repository is large (>100MB)
local size_mb=$(size_to_mb "$repo_size")
if [ "$size_mb" -gt 100 ]; then
((LARGE_REPOS++))
fi
# Check if repository is old (>30 days since last commit)
if [[ "$repo_age" =~ ^[0-9]+$ ]] && [ "$repo_age" -gt 30 ]; then
((OLD_REPOS++))
fi
# Calculate relative path from BASE_DIR
local relative_path="${repo_dir#$BASE_DIR/}"
if [ "$relative_path" = "$repo_dir" ]; then
relative_path="$repo_name"
fi
log_repo_processing "$repo_name" "$relative_path" "$repo_size" "$repo_age" "$status"
if ! cd "$repo_dir"; then
log_error "Failed to change to repository directory: $repo_dir"
return 1
fi
local operations_performed=()
local operations_failed=()
# Determine which commands to execute
local commands_to_execute=""
if [ -n "$CUSTOM_COMMANDS" ]; then
commands_to_execute="$CUSTOM_COMMANDS"
log_verbose "Using custom commands: $commands_to_execute"
else
commands_to_execute=$(get_safe_operations)
log_verbose "Using default safe operations: $commands_to_execute"
fi
# Execute commands
IFS=',' read -ra COMMANDS <<< "$commands_to_execute"
for cmd in "${COMMANDS[@]}"; do
cmd=$(echo "$cmd" | xargs) # trim whitespace
# Validate command
if ! is_valid_command "$cmd"; then
log_warning "Skipping unknown command: $cmd on $repo_name"
operations_failed+=("$cmd")
continue
fi
# Check if command requires clean working directory
if requires_clean_working_dir "$cmd"; then
if ! is_working_directory_clean; then
log_warning "Skipping $cmd on $repo_name: working directory is not clean"
operations_failed+=("$cmd")
continue
fi
fi
# Check if command is dangerous
if is_dangerous_command "$cmd"; then
log_warning "Executing potentially dangerous command: $cmd on $repo_name"
fi
# Execute the command
if [ "$DRY_RUN" = true ]; then
log_success "Would perform: git $cmd on $repo_name"
operations_performed+=("$cmd")
else
local success=false
local git_cmd=""
local success_msg=""
local fail_msg=""
local is_display=false
case "$cmd" in
fetch)
git_cmd="fetch --all --prune"
success_msg="Updated remote references"
fail_msg="Failed to fetch"
;;
gc)
git_cmd="gc --auto"
success_msg="Optimized repository"
fail_msg="Failed to optimize"
;;
prune)
git_cmd="prune"
success_msg="Cleaned unreachable objects"
fail_msg="Failed to prune"
;;
fsck)
git_cmd="fsck --no-dangling"
success_msg="Repository integrity verified"
fail_msg="Repository integrity issues found"
;;
pull)
git_cmd="pull"
success_msg="Pulled latest changes"
fail_msg="Failed to pull"
;;
status)
git_cmd="status"
success_msg="Status"
fail_msg="Failed to check status"
is_display=true
;;
log)
git_cmd="log --oneline -10"
success_msg="Recent commits"
fail_msg="Failed to check log"
is_display=true
;;
branch)
git_cmd="branch -a"
success_msg="Branches"
fail_msg="Failed to list branches"
is_display=true
;;
remote)
git_cmd="remote -v"
success_msg="Remotes"
fail_msg="Failed to list remotes"
is_display=true
;;
config)
git_cmd="config --list"
success_msg="Config"
fail_msg="Failed to list config"
is_display=true
;;
diff)
git_cmd="diff --stat"
success_msg="Diff"
fail_msg="Failed to check diff"
is_display=true
;;
merge|rebase|reset|clean)
# Capitalize first letter for better readability
local cmd_capitalized="${cmd:0:1}"
cmd_capitalized=$(echo "$cmd_capitalized" | tr '[:lower:]' '[:upper:]')
cmd_capitalized="${cmd_capitalized}${cmd:1}"
log_error "$cmd_capitalized command is too dangerous and not supported for safety reasons: $repo_name"
log_insight "Use 'git $cmd' manually if you really need to $cmd"
operations_failed+=("$cmd")
continue
;;
*)
# Generic command execution
git_cmd="$cmd"
success_msg="Executed git $cmd"
fail_msg="Failed to execute git $cmd"
;;
esac
if [ -n "$git_cmd" ]; then
if execute_git_command "$git_cmd" "$repo_name" "$success_msg" "$fail_msg" "$is_display"; then
success=true
fi
handle_command_result "$cmd" "$success" "operations_performed" "operations_failed"
fi
fi
done
# Log operations performed
if [ ${#operations_performed[@]} -gt 0 ]; then
log_verbose "Operations performed on $repo_name: ${operations_performed[*]}"
fi
# Return status based on results
if [ ${#operations_failed[@]} -gt 0 ]; then
# Add to failed repositories list
FAILED_REPOS+=("$repo_name (failed operations: ${operations_failed[*]})")
return 1
fi
return 0
}
# Generate detailed report
generate_report() {
echo
log_info "=== GIT REPOSITORY MAINTENANCE REPORT ==="
echo
# Basic statistics
log_stat "Total repositories processed: $TOTAL_REPOS"
log_stat "Clean repositories: $CLEAN_REPOS"
log_stat "Dirty repositories: $DIRTY_REPOS"
log_stat "Repositories ahead of upstream: $AHEAD_REPOS"
log_stat "Repositories behind upstream: $BEHIND_REPOS"
log_stat "Diverged repositories: $DIVERGED_REPOS"
log_stat "Repositories with stashes: $STASHED_REPOS"
log_stat "Large repositories (>100MB): $LARGE_REPOS"
log_stat "Old repositories (>30 days): $OLD_REPOS"
echo
# Insights and recommendations
log_insight "=== INSIGHTS & RECOMMENDATIONS ==="
if [ "$DIRTY_REPOS" -gt 0 ]; then
log_warning "Found $DIRTY_REPOS repositories with uncommitted changes"
log_insight "Consider reviewing and committing changes in these repositories"
fi
if [ "$AHEAD_REPOS" -gt 0 ]; then
log_info "Found $AHEAD_REPOS repositories ahead of upstream"
log_insight "Consider pushing changes to share your work"
fi
if [ "$BEHIND_REPOS" -gt 0 ]; then
log_info "Found $BEHIND_REPOS repositories behind upstream"
log_insight "Consider pulling latest changes to stay up to date"
fi
if [ "$DIVERGED_REPOS" -gt 0 ]; then
log_warning "Found $DIVERGED_REPOS diverged repositories"
log_insight "These repositories need manual attention to resolve conflicts"
fi
if [ "$STASHED_REPOS" -gt 0 ]; then
log_info "Found $STASHED_REPOS repositories with stashed changes"
log_insight "Consider reviewing and applying stashed changes"
fi
if [ "$LARGE_REPOS" -gt 0 ]; then
log_warning "Found $LARGE_REPOS large repositories (>100MB)"
log_insight "Consider using Git LFS for large files or cleaning up history"
fi
if [ "$OLD_REPOS" -gt 0 ]; then
log_info "Found $OLD_REPOS repositories with no recent activity (>30 days)"
log_insight "Consider archiving or cleaning up inactive repositories"
fi
# Health score calculation
local health_score=100
if [ "$TOTAL_REPOS" -gt 0 ]; then
# Calculate percentages with safe division
local dirty_percent=$(( (DIRTY_REPOS * 100) / TOTAL_REPOS ))
local diverged_percent=$(( (DIVERGED_REPOS * 100) / TOTAL_REPOS ))
local behind_percent=$(( (BEHIND_REPOS * 100) / TOTAL_REPOS ))
local stashed_percent=$(( (STASHED_REPOS * 100) / TOTAL_REPOS ))
local large_percent=$(( (LARGE_REPOS * 100) / TOTAL_REPOS ))
local old_percent=$(( (OLD_REPOS * 100) / TOTAL_REPOS ))
# Calculate health score with weighted penalties
# Dirty repos: -2 points per percent (high impact)
# Diverged repos: -3 points per percent (very high impact)
# Behind repos: -1 point per percent (medium impact)
# Stashed repos: -0.5 points per percent (low impact)
# Large repos: -0.5 points per percent (low impact)
# Old repos: -0.3 points per percent (very low impact)
local penalty=$((
(dirty_percent * 2) +
(diverged_percent * 3) +
(behind_percent * 1) +
(stashed_percent / 2) +
(large_percent / 2) +
(old_percent * 3 / 10)
))
health_score=$(( 100 - penalty ))
# Ensure score is between 0 and 100
if [ "$health_score" -lt 0 ]; then
health_score=0
elif [ "$health_score" -gt 100 ]; then
health_score=100
fi
else
# No repositories found - set health score to 0
health_score=0
fi
echo
log_stat "Repository Health Score: $health_score/100"
if [ "$health_score" -ge 90 ]; then
log_success "Excellent repository health! πŸŽ‰"
elif [ "$health_score" -ge 70 ]; then
log_info "Good repository health πŸ‘"
elif [ "$health_score" -ge 50 ]; then
log_warning "Moderate repository health ⚠️"
else
log_error "Poor repository health - needs attention! 🚨"
fi
}
# Main execution function
main() {
log_info "Starting Git maintenance process..."
# Parse arguments
parse_args "$@"
# Validate base directory
if [ ! -d "$BASE_DIR" ]; then
log_error "Directory '$BASE_DIR' does not exist or is not accessible"
exit 1
fi
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN MODE - No actual maintenance will be performed"
fi
# Convert to absolute path
if ! BASE_DIR=$(cd "$BASE_DIR" && pwd); then
log_error "Failed to access directory: $BASE_DIR"
exit 1
fi
log_verbose "Searching for Git repositories in: $BASE_DIR"
# Find all .git directories, excluding common build directories
local git_dirs
git_dirs=$(find "$BASE_DIR" -name '.git' -type d \
-not -path '*/target/*' \
-not -path '*/node_modules/*' \
-not -path '*/vendor/*' \
-not -path '*/.git/*' | sort)
if [ -z "$git_dirs" ]; then
log_info "No Git repositories found"
exit 0
fi
local repo_count=$(echo "$git_dirs" | wc -l)
log_info "Found $repo_count Git repository(ies)"
# Process repositories
local processed=0
local failed=0
while IFS= read -r git_dir; do
local repo_dir=$(dirname "$git_dir")
if maintain_repo "$repo_dir"; then
((processed++))
else
((failed++))
fi
done <<< "$git_dirs"
# Generate final report
generate_report
echo
log_info "=== MAINTENANCE SUMMARY ==="
log_success "Repositories processed: $processed"
if [ $failed -gt 0 ]; then
log_error "Repositories failed: $failed"
echo
log_warning "Failed repositories:"
for failed_repo in "${FAILED_REPOS[@]}"; do
echo " ❌ $failed_repo"
done
echo
log_error "Some repositories had issues during maintenance"
log_info "Check the verbose output above for specific error details"
exit 1
fi
if [ "$DRY_RUN" = true ]; then
log_warning "This was a dry run. Use without --dry-run to actually perform maintenance."
else
log_success "Git maintenance completed successfully!"
fi
}
# Execute main function
main "$@"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment