Skip to content

Instantly share code, notes, and snippets.

@denoww
Last active August 17, 2025 13:02
Show Gist options
  • Save denoww/4ed3acb2d942da7fc9c70adb5406c44d to your computer and use it in GitHub Desktop.
Save denoww/4ed3acb2d942da7fc9c70adb5406c44d to your computer and use it in GitHub Desktop.
#!/usr/bin/env bash
# docker_ecr_portaria.sh
set -euo pipefail
######################################
# Uso (exemplos):
# curl -fsSL https://gist.githubusercontent.com/denoww/4ed3acb2d942da7fc9c70adb5406c44d/raw \
# | sudo -E env HOME="$HOME" bash -s -- portaria "3001,9000,9001,9005" "npm run run_docker_image" production
#
# Desligar:
# sudo sh -c 'docker update --restart=no portaria && docker stop portaria'
######################################
### ====== CONFIG PADRÃO ======
REGION="us-east-1"
ACCOUNT_ID="516862767124"
REPO="${1}" # 1º arg: repositório ECR
PORTS_RAW="${2}" # 2º arg: "3010,9000,..." ou "3010:3010,9000:9000"
START_CMD="${3}" # 3º arg: comando dentro do container
ENVIRONMENT="${4:-production}" # 4º arg: development|production
IMAGE_TAG="latest"
CONTAINER_NAME="$REPO"
WORKDIR_ON_IMAGE="/src" # caminho esperado dentro do container
# Tuning (pode sobrescrever com env)
NOFILE_LIMIT="${NOFILE_LIMIT:-1048576}"
UV_THREADPOOL_SIZE="${UV_THREADPOOL_SIZE:-16}"
NODE_OPTIONS="${NODE_OPTIONS:-"--max-old-space-size=1024"}"
# Caminhos remotos (production)
PASTA_S3="s3://sc-environments/${REPO}" # tudo aqui será copiado para ${WORKDIR_ON_IMAGE}
### ===========================
# Preserva HOME real quando usando sudo
if [[ $EUID -eq 0 && -n "${SUDO_USER:-}" ]]; then
export HOME="$(getent passwd "$SUDO_USER" | cut -d: -f6)"
fi
REGION="${AWS_DEFAULT_REGION:-$REGION}"
AWS_OPTS=()
[[ -n "${AWS_PROFILE:-}" ]] && AWS_OPTS+=(--profile "$AWS_PROFILE")
AWS_OPTS+=(--region "$REGION")
IMAGE_URI="${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${IMAGE_TAG}"
ensure_sysctl_host() {
# Aplica tunings no host (beneficia host network e geralmente o bridge também)
local F=/etc/sysctl.d/99-socket.conf
sudo tee "$F" >/dev/null <<'EOF'
net.core.somaxconn = 4096
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_keepalive_time = 60
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_keepalive_probes = 4
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
net.ipv4.tcp_rmem = 4096 87380 134217728
net.ipv4.tcp_wmem = 4096 65536 134217728
EOF
sudo sysctl --system
}
echo "[1/10] Instalando Docker + AWS CLI (se necessário)..."
if ! command -v docker >/dev/null 2>&1; then
curl -fsSL https://get.docker.com | sh
id -nG 2>/dev/null | grep -qw docker || usermod -aG docker "${SUDO_USER:-$USER}" || true
fi
if ! command -v aws >/dev/null 2>&1; then
apt-get update -y && apt-get install -y unzip
curl -fsSL "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o awscliv2.zip
unzip -q awscliv2.zip && ./aws/install
fi
systemctl enable --now docker >/dev/null 2>&1 || true
echo "[2/10] Validando credenciais AWS..."
aws "${AWS_OPTS[@]}" sts get-caller-identity >/dev/null 2>&1 || {
echo "ERRO: credenciais AWS inválidas ou ausentes."
echo " - Em EC2: Actions -> Security -> Modify IAM role → selecione 'Admin Access'"
echo " - Em dev: rode 'aws configure'"
exit 1
}
echo "[3/10] Login no ECR..."
aws "${AWS_OPTS[@]}" ecr get-login-password \
| docker login --username AWS --password-stdin "${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com"
echo "[4/10] Pull da imagem ${IMAGE_URI}..."
docker pull "${IMAGE_URI}"
# Diretório temporário para sync do S3
TMPDIR="$(mktemp -d)"
cleanup() { rm -rf "${TMPDIR}" || true; }
trap cleanup EXIT
echo "[5/10] Preparando (${ENVIRONMENT})..."
USE_HOST_NETWORK=0
ENV_FILE=""
case "$ENVIRONMENT" in
development|dev)
USE_HOST_NETWORK=0
echo " - [DEV] Tentando extrair /src/env_DEV da imagem para usar como --env-file..."
TMP_ENV_DEV="$(mktemp)"
if docker run --rm "${IMAGE_URI}" sh -lc 'cat /src/env_DEV 2>/dev/null || true' > "${TMP_ENV_DEV}"; then
if [[ -s "${TMP_ENV_DEV}" ]]; then
ENV_FILE="${TMP_ENV_DEV}"
echo " - [DEV] env_DEV encontrado: ${ENV_FILE}"
else
rm -f "${TMP_ENV_DEV}"
echo " - [DEV] /src/env_DEV não encontrado; seguindo sem --env-file."
fi
fi
;;
production|prod)
USE_HOST_NETWORK=1
echo " - Sincronizando ${PASTA_S3} → ${TMPDIR}/payload ..."
mkdir -p "${TMPDIR}/payload"
aws "${AWS_OPTS[@]}" s3 sync "${PASTA_S3}/" "${TMPDIR}/payload/" --delete --only-show-errors || true
if [[ -f "${TMPDIR}/payload/env_PROD" ]]; then
ENV_FILE="${TMPDIR}/payload/env_PROD"
fi
;;
*)
echo "ERRO: ENVIRONMENT inválido. Use development|production."
exit 1
;;
esac
# Aplica sysctl no host (idempotente)
echo "[6/10] Aplicando sysctl no host..."
ensure_sysctl_host
echo "[7/10] Removendo container antigo (se existir)..."
docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true
# --- Portas (usadas somente em dev/bridge; em prod com host network são ignoradas) ---
PORT_FLAGS=()
IFS=',' read -r -a PORT_ITEMS <<< "${PORTS_RAW}"
for item in "${PORT_ITEMS[@]}"; do
item="$(echo "$item" | xargs)"; [[ -z "$item" ]] && continue
proto="tcp"; base="$item"
[[ "$item" == *"/"* ]] && { base="${item%%/*}"; proto="${item##*/}"; }
if [[ "$base" == *":"* ]]; then host="${base%%:*}"; cont="${base##*:}"; else host="$base"; cont="$base"; fi
if [[ "$proto" == "tcp" ]]; then PORT_FLAGS+=("-p" "${host}:${cont}"); else PORT_FLAGS+=("-p" "${host}:${cont}/${proto}"); fi
done
echo "[INFO] Portas declaradas: ${PORTS_RAW}"
# Flags de sysctl por container (complementam o sysctl do host)
DOCKER_SYSCTL=(
--sysctl net.core.somaxconn=4096
--sysctl net.ipv4.tcp_max_syn_backlog=8192
--sysctl net.ipv4.tcp_keepalive_time=60
--sysctl net.ipv4.tcp_keepalive_intvl=15
--sysctl net.ipv4.tcp_keepalive_probes=4
)
echo "[8/10] Criando container ${CONTAINER_NAME} (modo $( [[ $USE_HOST_NETWORK -eq 1 ]] && echo host || echo bridge ))..."
CREATE_ARGS=(--name "${CONTAINER_NAME}" --restart unless-stopped)
# ulimit para muitas conexões
CREATE_ARGS+=(--ulimit "nofile=${NOFILE_LIMIT}:${NOFILE_LIMIT}")
# sysctl dentro do container
if [[ "$USE_HOST_NETWORK" -eq 0 ]]; then
CREATE_ARGS+=("${DOCKER_SYSCTL[@]}")
fi
# env-file e envs úteis
[[ -n "${ENV_FILE}" ]] && CREATE_ARGS+=(--env-file "${ENV_FILE}")
CREATE_ARGS+=(-e "UV_THREADPOOL_SIZE=${UV_THREADPOOL_SIZE}")
CREATE_ARGS+=(-e "NODE_OPTIONS=${NODE_OPTIONS}")
# rede
if [[ "$USE_HOST_NETWORK" -eq 1 ]]; then
CREATE_ARGS+=(--network host)
else
CREATE_ARGS+=("${PORT_FLAGS[@]}")
fi
# Cria parado para poder copiar o conteúdo do S3 para o WORKDIR antes de iniciar (production)
CID="$(
docker create "${CREATE_ARGS[@]}" "${IMAGE_URI}" sh -lc "$START_CMD"
)"
echo " - Container criado: ${CID}"
# Em produção: copiar payload S3 → WORKDIR do container
if [[ "$USE_HOST_NETWORK" -eq 1 ]]; then
echo " - Copiando payload do S3 para ${WORKDIR_ON_IMAGE} dentro do container..."
docker exec "${CID}" /bin/sh -lc "mkdir -p '${WORKDIR_ON_IMAGE}'" 2>/dev/null || true
if [ -d "${TMPDIR}/payload" ]; then
docker cp "${TMPDIR}/payload/." "${CID}:${WORKDIR_ON_IMAGE}/" || true
fi
fi
echo "[9/10] Iniciando container..."
docker start "${CID}" >/dev/null
echo "[10/10] Pronto!"
[[ -n "${ENV_FILE}" ]] && echo "ENV usado: ${ENV_FILE}"
[[ "$USE_HOST_NETWORK" -eq 1 ]] && echo "Host network ligado; payload copiado para ${WORKDIR_ON_IMAGE}."
echo "ulimit nofile (solicitado): ${NOFILE_LIMIT}"
echo "Threadpool: ${UV_THREADPOOL_SIZE} | NODE_OPTIONS: ${NODE_OPTIONS}"
echo "Logs: sudo docker logs --tail 100 -f ${CONTAINER_NAME}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment