Created
June 14, 2025 22:11
-
-
Save polyglotdev/fe95616d6d14faea52f58cae0f078677 to your computer and use it in GitHub Desktop.
# AWS Kali Linux AMI Builder Automated script to convert a Parallels Desktop Kali Linux VM into an AWS AMI (Amazon Machine Image) for cloud deployment. ## π― Purpose Converts your local Parallels Kali Linux VM into a deployable AWS AMI, enabling yo
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
set -e | |
# --- CONFIGURATION --- | |
PARALLELS_VM_NAME="Kali Linux 2024.2 ARM64" # Name of your Parallels VM | |
RAW_IMAGE="kali-exported.raw" | |
S3_BUCKET="domhallan-kali-linux-bucket" | |
AWS_REGION="us-east-1" | |
# shellcheck disable=SC2034 | |
PACKER_TEMPLATE="kali-aws.json" | |
# shellcheck disable=SC2034 | |
USER_DATA="user-data.sh" | |
IMPORT_ROLE_NAME="vmimport" | |
PARALLELS_DISK_PATH="/Users/domhallan/Parallels/Kali Linux 2024.2 ARM64.pvm/kali-linux-2024.2-0.hdd" | |
TEMP_VMDK="kali-temp.vmdk" | |
# --- 0. CREATE S3 BUCKET IF NEEDED --- | |
if ! aws s3api head-bucket --bucket "$S3_BUCKET" 2>/dev/null; then | |
echo "[*] Creating S3 bucket $S3_BUCKET..." | |
if [ "$AWS_REGION" = "us-east-1" ]; then | |
aws s3api create-bucket --bucket "$S3_BUCKET" --region "$AWS_REGION" | |
else | |
aws s3api create-bucket --bucket "$S3_BUCKET" --region "$AWS_REGION" --create-bucket-configuration LocationConstraint="$AWS_REGION" | |
fi | |
else | |
echo "[*] S3 bucket $S3_BUCKET already exists." | |
fi | |
# --- 0b. SET BUCKET POLICY FOR VM IMPORT/EXPORT --- | |
echo "[*] Setting S3 bucket policy for VM Import/Export..." | |
aws s3api put-bucket-policy --bucket "$S3_BUCKET" --policy '{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Sid": "AllowVMImportExport", | |
"Effect": "Allow", | |
"Principal": { "Service": "vmie.amazonaws.com" }, | |
"Action": [ | |
"s3:GetBucketLocation", | |
"s3:GetObject", | |
"s3:ListBucket", | |
"s3:PutObject" | |
], | |
"Resource": [ | |
"arn:aws:s3:::'"$S3_BUCKET"'", | |
"arn:aws:s3:::'"$S3_BUCKET"'/*" | |
] | |
} | |
] | |
}' | |
# --- 0c. CREATE VM IMPORT/EXPORT ROLE IF NEEDED --- | |
if ! aws iam get-role --role-name "$IMPORT_ROLE_NAME" 2>/dev/null; then | |
echo "[*] Creating IAM role $IMPORT_ROLE_NAME for VM Import/Export..." | |
aws iam create-role --role-name "$IMPORT_ROLE_NAME" --assume-role-policy-document '{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Effect": "Allow", | |
"Principal": { "Service": "vmie.amazonaws.com" }, | |
"Action": "sts:AssumeRole" | |
} | |
] | |
}' | |
aws iam put-role-policy --role-name "$IMPORT_ROLE_NAME" --policy-name "$IMPORT_ROLE_NAME" --policy-document '{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Effect": "Allow", | |
"Action": [ | |
"s3:GetBucketLocation", | |
"s3:GetObject", | |
"s3:ListBucket" | |
], | |
"Resource": [ | |
"arn:aws:s3:::'"$S3_BUCKET"'", | |
"arn:aws:s3:::'"$S3_BUCKET"'/*" | |
] | |
}, | |
{ | |
"Effect": "Allow", | |
"Action": [ | |
"ec2:ModifySnapshotAttribute", | |
"ec2:CopySnapshot", | |
"ec2:RegisterImage", | |
"ec2:Describe*" | |
], | |
"Resource": "*" | |
} | |
] | |
}' | |
else | |
echo "[*] IAM role $IMPORT_ROLE_NAME already exists." | |
fi | |
# --- 1. PREPARE THE VM --- | |
echo "[*] Stopping the VM for clean export..." | |
prlctl stop "$PARALLELS_VM_NAME" || echo "[!] VM was not running or failed to stop" | |
# --- 2. CONVERT PARALLELS DISK TO PLAIN FORMAT --- | |
echo "[*] Converting Parallels HDD to plain format..." | |
# First, convert to plain format (this removes snapshots and makes it a single file) | |
prl_disk_tool convert --hdd "$PARALLELS_DISK_PATH" --plain | |
# --- 3. EXPORT TO VMDK FORMAT (More reliable for AWS import) --- | |
echo "[*] Exporting to VMDK format..." | |
# Use qemu-img to convert to VMDK (you may need to install qemu) | |
if command -v qemu-img &> /dev/null; then | |
# Find the actual disk file inside the .hdd bundle | |
DISK_FILE=$(find "$PARALLELS_DISK_PATH" -name "*.hds" | head -1) | |
if [ -z "$DISK_FILE" ]; then | |
echo "[!] Could not find .hds file in $PARALLELS_DISK_PATH" | |
echo "[*] Available files:" | |
ls -la "$PARALLELS_DISK_PATH" | |
exit 1 | |
fi | |
echo "[*] Found disk file: $DISK_FILE" | |
echo "[*] Converting to VMDK format using qemu-img..." | |
qemu-img convert -f raw "$DISK_FILE" -O vmdk "$TEMP_VMDK" | |
# Use VMDK for upload instead of RAW | |
UPLOAD_FILE="$TEMP_VMDK" | |
UPLOAD_NAME="kali-exported.vmdk" | |
else | |
echo "[!] qemu-img not found. Installing via Homebrew..." | |
if command -v brew &> /dev/null; then | |
brew install qemu | |
# Find the actual disk file inside the .hdd bundle | |
DISK_FILE=$(find "$PARALLELS_DISK_PATH" -name "*.hds" | head -1) | |
if [ -z "$DISK_FILE" ]; then | |
echo "[!] Could not find .hds file in $PARALLELS_DISK_PATH" | |
echo "[*] Available files:" | |
ls -la "$PARALLELS_DISK_PATH" | |
exit 1 | |
fi | |
echo "[*] Found disk file: $DISK_FILE" | |
echo "[*] Converting to VMDK format using qemu-img..." | |
qemu-img convert -f raw "$DISK_FILE" -O vmdk "$TEMP_VMDK" | |
# Use VMDK for upload instead of RAW | |
UPLOAD_FILE="$TEMP_VMDK" | |
UPLOAD_NAME="kali-exported.vmdk" | |
else | |
echo "[!] Neither qemu-img nor Homebrew found. Please install qemu-img manually." | |
echo "[*] Alternative: Extract the .hds file manually from the .hdd bundle:" | |
echo " find '$PARALLELS_DISK_PATH' -name '*.hds'" | |
echo " cp <found_hds_file> '$RAW_IMAGE'" | |
exit 1 | |
fi | |
fi | |
# --- 4. UPLOAD IMAGE TO S3 --- | |
echo "[*] Uploading image to S3..." | |
aws s3 cp "$UPLOAD_FILE" "s3://$S3_BUCKET/$UPLOAD_NAME" --region "$AWS_REGION" | |
# --- 5. CREATE DISK IMPORT TASK (Alternative to Packer) --- | |
echo "[*] Creating disk import task..." | |
IMPORT_TASK_ID=$(aws ec2 import-image \ | |
--description "Kali Linux 2024.2 ARM64 Import" \ | |
--disk-containers "Format=VMDK,UserBucket={S3Bucket=$S3_BUCKET,S3Key=$UPLOAD_NAME}" \ | |
--region "$AWS_REGION" \ | |
--query 'ImportTaskId' \ | |
--output text) | |
echo "[*] Import task created with ID: $IMPORT_TASK_ID" | |
echo "[*] Monitor the import progress with:" | |
echo " aws ec2 describe-import-image-tasks --import-task-ids $IMPORT_TASK_ID --region $AWS_REGION" | |
# --- 6. WAIT FOR IMPORT TO COMPLETE (Optional) --- | |
echo "[*] Waiting for import to complete..." | |
while true; do | |
STATUS=$(aws ec2 describe-import-image-tasks \ | |
--import-task-ids "$IMPORT_TASK_ID" \ | |
--region "$AWS_REGION" \ | |
--query 'ImportImageTasks[0].Status' \ | |
--output text) | |
echo "[*] Import status: $STATUS" | |
if [ "$STATUS" = "completed" ]; then | |
AMI_ID=$(aws ec2 describe-import-image-tasks \ | |
--import-task-ids "$IMPORT_TASK_ID" \ | |
--region "$AWS_REGION" \ | |
--query 'ImportImageTasks[0].ImageId' \ | |
--output text) | |
echo "[*] Import completed! AMI ID: $AMI_ID" | |
break | |
elif [ "$STATUS" = "deleted" ] || [ "$STATUS" = "deleting" ]; then | |
echo "[!] Import failed or was cancelled." | |
aws ec2 describe-import-image-tasks \ | |
--import-task-ids "$IMPORT_TASK_ID" \ | |
--region "$AWS_REGION" | |
exit 1 | |
fi | |
sleep 30 | |
done | |
# --- 7. CLEANUP --- | |
echo "[*] Cleaning up temporary files..." | |
rm -f "$TEMP_VMDK" | |
echo "[*] Done! Your Kali Linux AMI ($AMI_ID) is ready in region $AWS_REGION." | |
echo "[*] You can now launch instances using this AMI." |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment