Last active
February 28, 2025 23:28
-
-
Save keturn/4d1d47ff4387fdd536566f164f7cfcf2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# syntax=docker/dockerfile:1.4 | |
# Docker image for InvokeAI development. | |
# | |
# Design choices: | |
# - Do not use cache-mounts for uv and pnpm. Developers may want to update packages and they're faster | |
# if they stay on the same filesystem. Also less hassle to deal with mount permissions. | |
# - Do not use multiple build stages, as we want all the build tools to remain available in the end. | |
FROM library/ubuntu:24.04 | |
ENV CONTAINER_UID=${CONTAINER_UID:-1000} | |
ENV CONTAINER_GID=${CONTAINER_GID:-1000} | |
ENV INVOKEAI_ROOT=/invokeai | |
ENV INVOKEAI_HOST=0.0.0.0 | |
ENV INVOKEAI_PORT=9090 | |
ENV PNPM_HOME="/pnpm" | |
ENV VIRTUAL_ENV=/opt/venv | |
ENV PATH="${VIRTUAL_ENV}/bin:${PNPM_HOME}:$PATH" | |
ENV INVOKEAI_SRC=/opt/invokeai | |
ENV PYTHON_VERSION=3.11 | |
ENV UV_PYTHON=${PYTHON_VERSION} | |
ENV UV_COMPILE_BYTECODE=1 | |
ENV UV_PROJECT_ENVIRONMENT="${VIRTUAL_ENV}" | |
# PyPI For CUDA packages; will change for CPU-only builds. | |
ENV UV_INDEX="https://download.pytorch.org/whl/cu124" | |
ARG GPU_DRIVER=cuda | |
# unused but available | |
ARG BUILDPLATFORM | |
ARG DEBIAN_FRONTEND=noninteractive | |
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache | |
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ | |
--mount=type=cache,target=/var/lib/apt,sharing=locked <<EOF | |
set -e | |
apt-get --quiet update | |
apt-get --quiet install --assume-yes --no-install-recommends --autoremove \ | |
ca-certificates \ | |
build-essential \ | |
git \ | |
curl \ | |
bzip2 \ | |
libopencv-dev \ | |
libstdc++-10-dev | |
EOF | |
# Link amdgpu.ids for ROCm builds | |
# contributed by https://github.com/Rubonnek | |
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\ | |
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" | |
# Switch to the `ubuntu` user to work around dependency issues with uv-installed python | |
RUN <<EOF | |
mkdir -p ${VIRTUAL_ENV} | |
mkdir -p ${INVOKEAI_SRC} | |
chmod -R a+w /opt | |
mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT} | |
mkdir -p ${PNPM_HOME} && chown ubuntu: ${PNPM_HOME} | |
mkdir ~ubuntu/.cache && chown ubuntu: ~ubuntu/.cache | |
# for patchmatch build | |
ln -sf --relative /usr/lib/$(uname -p)-linux-gnu/pkgconfig/{opencv4,opencv}.pc | |
EOF | |
# Install `pnpm` for JS package management | |
RUN <<EOF | |
set -e | |
curl -fsSL https://get.pnpm.io/install.sh | env PNPM_VERSION=8.15.9 ENV="$HOME/.shrc" SHELL="$(which sh)" sh - | |
pnpm env use --global 22 | |
chown -R ubuntu: ${PNPM_HOME} | |
EOF | |
# Install `uv` for Python package management | |
COPY --from=ghcr.io/astral-sh/uv:0.6.3 /uv /uvx /bin/ | |
USER ubuntu | |
# Install python | |
RUN uv python install ${PYTHON_VERSION} | |
WORKDIR ${INVOKEAI_SRC} | |
# Install project's dependencies as a separate layer so they aren't rebuilt every commit. | |
# bind-mount instead of copy to defer adding sources to the image until next layer. | |
# | |
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu | |
# x86_64/CUDA is the default | |
RUN --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ | |
--mount=type=bind,source=invokeai/version,target=invokeai/version <<EOF | |
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then | |
UV_INDEX="https://download.pytorch.org/whl/cpu"; | |
elif [ "$GPU_DRIVER" = "rocm" ]; then | |
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; | |
fi | |
uv sync --extra=docs --extra=dev --extra=test --no-install-project | |
EOF | |
# Now that the bulk of the dependencies have been installed, copy in the project files that change more frequently. | |
COPY --chown=ubuntu:ubuntu invokeai invokeai | |
COPY --chown=ubuntu:ubuntu pyproject.toml . | |
RUN --mount=type=bind,source=pyproject.toml,target=pyproject.toml <<EOF | |
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then | |
UV_INDEX="https://download.pytorch.org/whl/cpu"; | |
elif [ "$GPU_DRIVER" = "rocm" ]; then | |
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; | |
fi | |
uv sync --extra=docs --extra=dev --extra=test | |
# patchmatch builds on import | |
python -c "from patchmatch import patch_match" | |
EOF | |
WORKDIR ${INVOKEAI_SRC}/invokeai/frontend/web/ | |
RUN pnpm install --frozen-lockfile | |
RUN pnpm exec vite build |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment