feat(install): Adds support for podman(compose) (#3673)

This commit is contained in:
Daniel Bunte 2025-07-22 15:58:07 +02:00 committed by GitHub
parent d696c202df
commit abe34d09ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 225 additions and 80 deletions

View File

@ -40,16 +40,30 @@ jobs:
if: github.repository_owner == 'getsentry'
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-24.04, ubuntu-24.04-arm]
name: ${{ matrix.os == 'ubuntu-24.04-arm' && 'integration test (arm64)' || 'integration test' }}
container_engine: ['docker'] # TODO: add 'podman' into the list
name: ${{ matrix.os == 'ubuntu-24.04-arm' && (matrix.container_engine == 'docker' && 'integration test (arm64)' || 'integration test (arm64 podman)') || (matrix.container_engine == 'docker' && 'integration test' || 'integration test (podman)') }}
env:
REPORT_SELF_HOSTED_ISSUES: 0
SELF_HOSTED_TESTING_DSN: ${{ vars.SELF_HOSTED_TESTING_DSN }}
CONTAINER_ENGINE_PODMAN: ${{ matrix.container_engine == 'podman' && '1' || '0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Podman
if: matrix.container_engine == 'podman'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends podman
# TODO: Replace below with podman-compose
# We need this commit to be able to work: https://github.com/containers/podman-compose/commit/8206cc3ea277eee6c2e87d4cd66eba8eae3d44eb
pip3 install --user https://github.com/containers/podman-compose/archive/main.tar.gz
echo "PODMAN_COMPOSE_PROVIDER=podman-compose" >> $GITHUB_ENV
echo "PODMAN_COMPOSE_WARNING_LOGS=false" >> $GITHUB_ENV
- name: Use action from local checkout
uses: './'
with:

View File

@ -1,5 +1,7 @@
x-restart-policy: &restart_policy
restart: unless-stopped
x-pull-policy: &pull_policy
pull_policy: never
x-depends_on-healthy: &depends_on-healthy
condition: service_healthy
x-depends_on-default: &depends_on-default
@ -15,7 +17,7 @@ x-healthcheck-defaults: &healthcheck_defaults
retries: $HEALTHCHECK_RETRIES
start_period: 10s
x-sentry-defaults: &sentry_defaults
<<: *restart_policy
<<: [*restart_policy, *pull_policy]
image: sentry-self-hosted-local
# Set the platform to build for linux/arm64 when needed on Apple silicon Macs.
platform: ${DOCKER_PLATFORM:-}
@ -174,7 +176,7 @@ services:
timeout: 10s
retries: 30
clickhouse:
<<: *restart_policy
<<: [*restart_policy, *pull_policy]
image: clickhouse-self-hosted-local
build:
context: ./clickhouse
@ -329,7 +331,7 @@ services:
target: /etc/symbolicator
command: run -c /etc/symbolicator/config.yml
symbolicator-cleanup:
<<: *restart_policy
<<: [*restart_policy, *pull_policy]
image: symbolicator-cleanup-self-hosted-local
build:
context: ./cron
@ -550,7 +552,7 @@ services:
profiles:
- feature-complete
vroom-cleanup:
<<: *restart_policy
<<: [*restart_policy, *pull_policy]
image: vroom-cleanup-self-hosted-local
build:
context: ./cron

View File

@ -0,0 +1,12 @@
echo "${_group}Detecting container engine ..."
if [[ "${CONTAINER_ENGINE_PODMAN:-0}" -eq 1 ]] && command -v podman &>/dev/null; then
export CONTAINER_ENGINE="podman"
elif command -v docker &>/dev/null; then
export CONTAINER_ENGINE="docker"
else
echo "FAIL: Neither podman nor docker is installed on the system."
exit 1
fi
echo "Detected container engine: $CONTAINER_ENGINE"
echo "${_endgroup}"

View File

@ -2,6 +2,9 @@
MIN_DOCKER_VERSION='19.03.6'
MIN_COMPOSE_VERSION='2.32.2'
MIN_PODMAN_VERSION='4.9.3'
MIN_PODMAN_COMPOSE_VERSION='1.3.0'
# 16 GB minimum host RAM, but there'll be some overhead outside of what
# can be allotted to docker
if [[ "$COMPOSE_PROFILES" == "errors-only" ]]; then

View File

@ -3,10 +3,10 @@ echo "${_group}Building and tagging Docker images ..."
echo ""
# Build any service that provides the image sentry-self-hosted-local first,
# as it is used as the base image for sentry-cleanup-self-hosted-local.
$dcb --force-rm web
$dcb web
# Build each other service individually to localize potential failures better.
for service in $($dc config --services); do
$dcb --force-rm "$service"
$dcb "$service"
done
echo ""
echo "Docker images built."

View File

@ -2,31 +2,41 @@ echo "${_group}Checking minimum requirements ..."
source install/_min-requirements.sh
DOCKER_VERSION=$(docker version --format '{{.Server.Version}}' || echo '')
DOCKER_VERSION=$($CONTAINER_ENGINE version --format '{{.Server.Version}}' || echo '')
if [[ -z "$DOCKER_VERSION" ]]; then
echo "FAIL: Unable to get docker version, is the docker daemon running?"
echo "FAIL: Unable to get $CONTAINER_ENGINE version, is the $CONTAINER_ENGINE daemon running?"
exit 1
fi
if ! vergte ${DOCKER_VERSION//v/} $MIN_DOCKER_VERSION; then
echo "FAIL: Expected minimum docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION"
exit 1
if [[ "$CONTAINER_ENGINE" == "docker" ]]; then
if ! vergte ${DOCKER_VERSION//v/} $MIN_DOCKER_VERSION; then
echo "FAIL: Expected minimum docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION"
exit 1
fi
if ! vergte ${COMPOSE_VERSION//v/} $MIN_COMPOSE_VERSION; then
echo "FAIL: Expected minimum $dc_base version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION"
exit 1
fi
elif [[ "$CONTAINER_ENGINE" == "podman" ]]; then
if ! vergte ${DOCKER_VERSION//v/} $MIN_PODMAN_VERSION; then
echo "FAIL: Expected minimum podman version to be $MIN_PODMAN_VERSION but found $DOCKER_VERSION"
exit 1
fi
if ! vergte ${COMPOSE_VERSION//v/} $MIN_PODMAN_COMPOSE_VERSION; then
echo "FAIL: Expected minimum $dc_base version to be $MIN_PODMAN_COMPOSE_VERSION but found $COMPOSE_VERSION"
exit 1
fi
fi
echo "Found Docker version $DOCKER_VERSION"
echo "Found $CONTAINER_ENGINE version $DOCKER_VERSION"
echo "Found $CONTAINER_ENGINE Compose version $COMPOSE_VERSION"
if ! vergte ${COMPOSE_VERSION//v/} $MIN_COMPOSE_VERSION; then
echo "FAIL: Expected minimum $dc_base version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION"
exit 1
fi
echo "Found Docker Compose version $COMPOSE_VERSION"
CPU_AVAILABLE_IN_DOCKER=$(docker run --rm busybox nproc --all)
CPU_AVAILABLE_IN_DOCKER=$($CONTAINER_ENGINE run --rm busybox nproc --all)
if [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_HARD" ]]; then
echo "FAIL: Required minimum CPU cores available to Docker is $MIN_CPU_HARD, found $CPU_AVAILABLE_IN_DOCKER"
exit 1
fi
RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}')
RAM_AVAILABLE_IN_DOCKER=$($CONTAINER_ENGINE run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}')
if [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_HARD" ]]; then
echo "FAIL: Required minimum RAM available to Docker is $MIN_RAM_HARD MB, found $RAM_AVAILABLE_IN_DOCKER MB"
exit 1
@ -35,9 +45,9 @@ fi
#SSE4.2 required by Clickhouse (https://clickhouse.yandex/docs/en/operations/requirements/)
# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. https://github.com/ClickHouse/ClickHouse/issues/20#issuecomment-226849297
# This may also happen on other virtualization software such as on VMWare ESXi hosts.
IS_KVM=$(docker run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :)
IS_KVM=$($CONTAINER_ENGINE run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :)
if [[ ! "$SKIP_SSE42_REQUIREMENTS" -eq 1 && "$IS_KVM" -eq 0 && "$DOCKER_ARCH" = "x86_64" ]]; then
SUPPORTS_SSE42=$(docker run --rm busybox grep -c sse4_2 /proc/cpuinfo || :)
SUPPORTS_SSE42=$($CONTAINER_ENGINE run --rm busybox grep -c sse4_2 /proc/cpuinfo || :)
if [[ "$SUPPORTS_SSE42" -eq 0 ]]; then
echo "FAIL: The CPU your machine is running on does not support the SSE 4.2 instruction set, which is required for one of the services Sentry uses (Clickhouse). See https://github.com/getsentry/self-hosted/issues/340 for more info."
exit 1

View File

@ -1,10 +1,21 @@
echo "${_group}Creating volumes for persistent storage ..."
echo "Created $(docker volume create --name=sentry-clickhouse)."
echo "Created $(docker volume create --name=sentry-data)."
echo "Created $(docker volume create --name=sentry-kafka)."
echo "Created $(docker volume create --name=sentry-postgres)."
echo "Created $(docker volume create --name=sentry-redis)."
echo "Created $(docker volume create --name=sentry-symbolicator)."
create_volume() {
create_command="$CONTAINER_ENGINE volume create"
if [ "$CONTAINER_ENGINE" = "podman" ]; then
create_command="$create_command --ignore $1"
else
create_command="$create_command --name=$1"
fi
$create_command
}
echo "Created $(create_volume sentry-clickhouse)."
echo "Created $(create_volume sentry-data)."
echo "Created $(create_volume sentry-kafka)."
echo "Created $(create_volume sentry-postgres)."
echo "Created $(create_volume sentry-redis)."
echo "Created $(create_volume sentry-symbolicator)."
echo "${_endgroup}"

View File

@ -6,17 +6,27 @@ else
_endgroup=""
fi
echo "${_group}Initializing Docker Compose ..."
echo "${_group}Initializing Docker|Podman Compose ..."
export CONTAINER_ENGINE="docker"
if [[ "${CONTAINER_ENGINE_PODMAN:-0}" -eq 1 ]]; then
if command -v podman &>/dev/null; then
export CONTAINER_ENGINE="podman"
else
echo "FAIL: Podman is not installed on the system."
exit 1
fi
fi
# To support users that are symlinking to docker-compose
dc_base="$(docker compose version --short &>/dev/null && echo 'docker compose' || echo '')"
dc_base_standalone="$(docker-compose version --short &>/dev/null && echo 'docker-compose' || echo '')"
dc_base="$(${CONTAINER_ENGINE} compose version --short &>/dev/null && echo "$CONTAINER_ENGINE compose" || echo '')"
dc_base_standalone="$(${CONTAINER_ENGINE}-compose version --short &>/dev/null && echo "$CONTAINER_ENGINE-compose" || echo '')"
COMPOSE_VERSION=$([ -n "$dc_base" ] && $dc_base version --short || echo '')
STANDALONE_COMPOSE_VERSION=$([ -n "$dc_base_standalone" ] && $dc_base_standalone version --short || echo '')
if [[ -z "$COMPOSE_VERSION" && -z "$STANDALONE_COMPOSE_VERSION" ]]; then
echo "FAIL: Docker Compose is required to run self-hosted"
echo "FAIL: Docker|Podman Compose is required to run self-hosted"
exit 1
fi
@ -25,14 +35,57 @@ if [[ -z "$COMPOSE_VERSION" ]] || [[ -n "$STANDALONE_COMPOSE_VERSION" ]] && ! ve
dc_base="$dc_base_standalone"
fi
if [[ "$(basename $0)" = "install.sh" ]]; then
dc="$dc_base --ansi never --env-file ${_ENV}"
if [[ "$CONTAINER_ENGINE" == "podman" ]]; then
NO_ANSI="--no-ansi"
else
dc="$dc_base --ansi never"
NO_ANSI="--ansi never"
fi
if [[ "$(basename $0)" = "install.sh" ]]; then
dc="$dc_base $NO_ANSI --env-file ${_ENV}"
else
dc="$dc_base $NO_ANSI"
fi
proxy_args="--build-arg http_proxy=${http_proxy:-} --build-arg https_proxy=${https_proxy:-} --build-arg no_proxy=${no_proxy:-}"
dcr="$dc run --pull=never --rm"
if [[ "$CONTAINER_ENGINE" == "podman" ]]; then
proxy_args_dc="--podman-build-args http_proxy=${http_proxy:-},https_proxy=${https_proxy:-},no_proxy=${no_proxy:-}"
# Disable pod creation as these are one-off commands and creating a pod
# prints its pod id to stdout which is messing with the output that we
# rely on various places such as configuration generation
dcr="$dc --profile=feature-complete --in-pod=false run --rm"
else
proxy_args_dc=$proxy_args
dcr="$dc run --pull=never --rm"
fi
dcb="$dc build $proxy_args"
dbuild="docker build $proxy_args"
dbuild="$CONTAINER_ENGINE build $proxy_args"
echo "$dcr"
# Utility function to handle --wait with docker and podman
function start_service_and_wait_ready() {
local options=()
local services=()
local found_service=0
for arg in "$@"; do
if [[ $found_service -eq 0 && "$arg" == -* ]]; then
options+=("$arg")
else
found_service=1
services+=("$arg")
fi
done
if [ "$CONTAINER_ENGINE" = "podman" ]; then
$dc up --force-recreate -d "${options[@]}" "${services[@]}"
for service in "${services[@]}"; do
while ! $CONTAINER_ENGINE ps --filter "health=healthy" | grep "$service"; do
sleep 2
done
done
else
$dc up --wait "${options[@]}" "${services[@]}"
fi
}
echo "${_endgroup}"

View File

@ -1,3 +1,5 @@
source install/_detect-container-engine.sh
echo "${_group}Detecting Docker platform"
# Sentry SaaS uses stock Yandex ClickHouse, but they don't provide images that
@ -12,13 +14,13 @@ echo "${_group}Detecting Docker platform"
# linux/amd64 by default due to virtualization.
# See https://github.com/docker/cli/issues/3286 for the Docker bug.
if ! command -v docker &>/dev/null; then
echo "FAIL: Could not find a \`docker\` binary on this system. Are you sure it's installed?"
exit 1
FORMAT="{{.Architecture}}"
if [[ $CONTAINER_ENGINE == "podman" ]]; then
FORMAT="{{.Host.Arch}}"
fi
export DOCKER_ARCH=$(docker info --format '{{.Architecture}}')
if [[ "$DOCKER_ARCH" = "x86_64" ]]; then
export DOCKER_ARCH=$($CONTAINER_ENGINE info --format "$FORMAT")
if [[ "$DOCKER_ARCH" = "x86_64" || "$DOCKER_ARCH" = "amd64" ]]; then
export DOCKER_PLATFORM="linux/amd64"
elif [[ "$DOCKER_ARCH" = "aarch64" ]]; then
export DOCKER_PLATFORM="linux/arm64"

View File

@ -3,5 +3,7 @@
# TODO: Remove this after the next hard-stop
echo "${_group}Ensuring correct permissions on profiles directory ..."
$dcr --no-deps --entrypoint /bin/bash --user root vroom -c 'chown -R vroom:vroom /var/vroom/sentry-profiles && chmod -R o+rwx /var/vroom/sentry-profiles'
echo "${_endgroup}"

View File

@ -6,8 +6,8 @@ fi
$dbuild -t sentry-self-hosted-jq-local --platform="$DOCKER_PLATFORM" jq
jq="docker run --rm -i sentry-self-hosted-jq-local"
sentry_cli="docker run --rm -v /tmp:/work -e SENTRY_DSN=$SENTRY_DSN getsentry/sentry-cli"
jq="$CONTAINER_ENGINE run --rm -i sentry-self-hosted-jq-local"
sentry_cli="$CONTAINER_ENGINE run --rm -v /tmp:/work -e SENTRY_DSN=$SENTRY_DSN getsentry/sentry-cli"
send_envelope() {
# Send envelope
@ -27,7 +27,7 @@ send_event() {
local breadcrumbs=$5
local fingerprint_value=$(
echo -n "$cmd_exit $error_msg $traceback" |
docker run -i --rm busybox md5sum |
$CONTAINER_ENGINE run -i --rm busybox md5sum |
cut -d' ' -f1
)
local envelope_file="sentry-envelope-${fingerprint_value}"
@ -151,7 +151,7 @@ fi
# Make sure we can use sentry-cli if we need it.
if [ "$REPORT_SELF_HOSTED_ISSUES" == 1 ]; then
if ! docker pull getsentry/sentry-cli:latest; then
if ! $CONTAINER_ENGINE pull getsentry/sentry-cli:latest; then
echo "Failed to pull sentry-cli, won't report to Sentry after all."
export REPORT_SELF_HOSTED_ISSUES=0
fi

View File

@ -21,7 +21,7 @@ install_geoip() {
else
echo "IP address geolocation is configured for updates."
echo "Updating IP address geolocation database ... "
if ! docker run --rm -v "./geoip:/sentry" --entrypoint '/usr/bin/geoipupdate' "ghcr.io/maxmind/geoipupdate:v6.1.0" "-d" "/sentry" "-f" "/sentry/GeoIP.conf"; then
if ! $CONTAINER_ENGINE run --rm -v "./geoip:/sentry" --entrypoint '/usr/bin/geoipupdate' "ghcr.io/maxmind/geoipupdate:v6.1.0" "-d" "/sentry" "-f" "/sentry/GeoIP.conf"; then
result='Error'
fi
echo "$result updating IP address geolocation database."

View File

@ -4,7 +4,7 @@ show_help() {
cat <<EOF
Usage: $0 [options]
Install Sentry with \`docker compose\`.
Install Sentry with \`docker|podman compose\`.
Options:
-h, --help Show this message and exit.
@ -29,6 +29,8 @@ Options:
--no-report-self-hosted-issues
Do not report error and performance data about your
self-hosted instance upstream to Sentry.
--container-engine-podman
Use podman as the container engine.
EOF
}
@ -46,6 +48,7 @@ MINIMIZE_DOWNTIME="${MINIMIZE_DOWNTIME:-}"
SKIP_COMMIT_CHECK="${SKIP_COMMIT_CHECK:-}"
REPORT_SELF_HOSTED_ISSUES="${REPORT_SELF_HOSTED_ISSUES:-}"
SKIP_SSE42_REQUIREMENTS="${SKIP_SSE42_REQUIREMENTS:-}"
CONTAINER_ENGINE_PODMAN="${CONTAINER_ENGINE_PODMAN:-}"
while (($#)); do
case "$1" in
@ -67,6 +70,7 @@ while (($#)); do
--report-self-hosted-issues) REPORT_SELF_HOSTED_ISSUES=1 ;;
--no-report-self-hosted-issues) REPORT_SELF_HOSTED_ISSUES=0 ;;
--skip-sse42-requirements) SKIP_SSE42_REQUIREMENTS=1 ;;
--container-engine-podman) CONTAINER_ENGINE_PODMAN=1 ;;
--) ;;
*)
echo "Unexpected argument: $1. Use --help for usage information."

View File

@ -2,7 +2,7 @@ echo "${_group}Setting up / migrating database ..."
if [[ -z "${SKIP_SENTRY_MIGRATIONS:-}" ]]; then
# Fixes https://github.com/getsentry/self-hosted/issues/2758, where a migration fails due to indexing issue
$dc up --wait postgres
start_service_and_wait_ready postgres
os=$($dc exec postgres cat /etc/os-release | grep 'ID=debian')
if [[ -z $os ]]; then

View File

@ -14,7 +14,7 @@ if [[ "${SETUP_JS_SDK_ASSETS:-}" == "1" ]]; then
$dbuild -t sentry-self-hosted-jq-local --platform="$DOCKER_PLATFORM" jq
jq="docker run --rm -i sentry-self-hosted-jq-local"
jq="$CONTAINER_ENGINE run --rm -i sentry-self-hosted-jq-local"
loader_registry=$($dcr --no-deps --rm -T web cat /usr/src/sentry/src/sentry/loader/_registry.json)
# The `loader_registry` should start with "Updating certificates...", we want to delete that and the subsequent ca-certificates related lines.

View File

@ -5,7 +5,16 @@ if [[ -n "$MINIMIZE_DOWNTIME" ]]; then
$dc rm -fsv $($dc config --services | grep -v -E '^(nginx|relay)$')
else
# Clean up old stuff and ensure nothing is working while we install/update
$dc down -t $STOP_TIMEOUT --rmi local --remove-orphans
if [ "$CONTAINER_ENGINE" = "podman" ]; then
$dc down -t $STOP_TIMEOUT --remove-orphans
dangling_images=$($CONTAINER_ENGINE images --quiet --filter dangling=true)
if [ -n "$dangling_images" ]; then
# Remove dangling images
$CONTAINER_ENGINE rmi -f $dangling_images
fi
else
$dc down -t $STOP_TIMEOUT --rmi local --remove-orphans
fi
fi
echo "${_endgroup}"

View File

@ -1,14 +1,20 @@
echo "${_group}Fetching and updating Docker images ..."
echo "${_group}Fetching and updating $CONTAINER_ENGINE images ..."
# We tag locally built images with a '-self-hosted-local' suffix. `docker
# compose pull` tries to pull these too and shows a 404 error on the console
# which is confusing and unnecessary. To overcome this, we add the
# stderr>stdout redirection below and pass it through grep, ignoring all lines
# having this '-onpremise-local' suffix.
if [ "$CONTAINER_ENGINE" = "podman" ]; then
# podman compose doesn't have the --ignore-pull-failures option, so can just
# run the command normally
$dc --profile feature-complete pull || true
else
# We tag locally built images with a '-self-hosted-local' suffix. `docker
# compose pull` tries to pull these too and shows a 404 error on the console
# which is confusing and unnecessary. To overcome this, we add the
# stderr>stdout redirection below and pass it through grep, ignoring all lines
# having this '-onpremise-local' suffix.
$dc pull -q --ignore-pull-failures 2>&1 | grep -v -- -self-hosted-local || true
$dc pull --ignore-pull-failures 2>&1 | grep -v -- -self-hosted-local || true
fi
# We may not have the set image on the repo (local images) so allow fails
docker pull ${SENTRY_IMAGE} || true
$CONTAINER_ENGINE pull ${SENTRY_IMAGE} || true
echo "${_endgroup}"

View File

@ -1,19 +1,28 @@
echo "${_group}Upgrading Clickhouse ..."
# First check to see if user is upgrading by checking for existing clickhouse volume
if $dc ps -a | grep -q clickhouse; then
if [ "$CONTAINER_ENGINE" = "podman" ]; then
ps_command="$dc ps"
build_arg="--podman-build-args"
else
# docker compose needs to be run with the -a flag to show all containers
ps_command="$dc ps -a"
build_arg="--build-arg"
fi
if $ps_command | grep -q clickhouse; then
# Start clickhouse if it is not already running
$dc up --wait clickhouse
start_service_and_wait_ready clickhouse
# In order to get to 23.8, we need to first upgrade go from 21.8 -> 22.8 -> 23.3 -> 23.8
version=$($dc exec clickhouse clickhouse-client -q 'SELECT version()')
if [[ "$version" == "21.8.13.1.altinitystable" || "$version" == "21.8.12.29.altinitydev.arm" ]]; then
$dc down clickhouse
$dcb --build-arg BASE_IMAGE=altinity/clickhouse-server:22.8.15.25.altinitystable clickhouse
$dc up --wait clickhouse
$dcb $build_arg BASE_IMAGE=altinity/clickhouse-server:22.8.15.25.altinitystable clickhouse
start_service_and_wait_ready clickhouse
$dc down clickhouse
$dcb --build-arg BASE_IMAGE=altinity/clickhouse-server:23.3.19.33.altinitystable clickhouse
$dc up --wait clickhouse
$dcb $build_arg BASE_IMAGE=altinity/clickhouse-server:23.3.19.33.altinitystable clickhouse
start_service_and_wait_ready clickhouse
else
echo "Detected clickhouse version $version. Skipping upgrades!"
fi

View File

@ -1,26 +1,26 @@
echo "${_group}Ensuring proper PostgreSQL version ..."
if [[ -n "$(docker volume ls -q --filter name=sentry-postgres)" && "$(docker run --rm -v sentry-postgres:/db busybox cat /db/PG_VERSION 2>/dev/null)" == "9.6" ]]; then
docker volume rm sentry-postgres-new || true
if [[ -n "$($CONTAINER_ENGINE volume ls -q --filter name=sentry-postgres)" && "$($CONTAINER_ENGINE run --rm -v sentry-postgres:/db busybox cat /db/PG_VERSION 2>/dev/null)" == "9.6" ]]; then
$CONTAINER_ENGINE volume rm sentry-postgres-new || true
# If this is Postgres 9.6 data, start upgrading it to 14.0 in a new volume
docker run --rm \
$CONTAINER_ENGINE run --rm \
-v sentry-postgres:/var/lib/postgresql/9.6/data \
-v sentry-postgres-new:/var/lib/postgresql/14/data \
tianon/postgres-upgrade:9.6-to-14
# Get rid of the old volume as we'll rename the new one to that
docker volume rm sentry-postgres
docker volume create --name sentry-postgres
$CONTAINER_ENGINE volume rm sentry-postgres
$CONTAINER_ENGINE volume create --name sentry-postgres
# There's no rename volume in Docker so copy the contents from old to new name
# Also append the `host all all all trust` line as `tianon/postgres-upgrade:9.6-to-14`
# doesn't do that automatically.
docker run --rm -v sentry-postgres-new:/from -v sentry-postgres:/to alpine ash -c \
$CONTAINER_ENGINE run --rm -v sentry-postgres-new:/from -v sentry-postgres:/to alpine ash -c \
"cd /from ; cp -av . /to ; echo 'host all all all trust' >> /to/pg_hba.conf"
# Finally, remove the new old volume as we are all in sentry-postgres now.
docker volume rm sentry-postgres-new
$CONTAINER_ENGINE volume rm sentry-postgres-new
echo "Re-indexing due to glibc change, this may take a while..."
echo "Starting up new PostgreSQL version"
$dc up --wait postgres
start_service_and_wait_ready postgres
# Wait for postgres
RETRIES=5

View File

@ -2,15 +2,15 @@ if [[ "$MINIMIZE_DOWNTIME" ]]; then
echo "${_group}Waiting for Sentry to start ..."
# Start the whole setup, except nginx and relay.
$dc up --wait --remove-orphans $($dc config --services | grep -v -E '^(nginx|relay)$')
start_service_and_wait_ready --remove-orphans $($dc config --services | grep -v -E '^(nginx|relay)$')
$dc restart relay
$dc exec -T nginx nginx -s reload
docker run --rm --network="${COMPOSE_PROJECT_NAME}_default" alpine ash \
$CONTAINER_ENGINE run --rm --network="${COMPOSE_PROJECT_NAME}_default" alpine ash \
-c 'while [[ "$(wget -T 1 -q -O- http://web:9000/_health/)" != "ok" ]]; do sleep 0.5; done'
# Make sure everything is up. This should only touch relay and nginx
$dc up --wait
start_service_and_wait_ready $($dc config --services)
echo "${_endgroup}"
else
@ -22,7 +22,15 @@ else
if [[ "${_ENV}" =~ ".env.custom" ]]; then
echo " $dc_base --env-file .env --env-file ${_ENV} up --wait"
else
echo " $dc_base up --wait"
if [[ "$CONTAINER_ENGINE" == "podman" ]]; then
if [[ "$COMPOSE_PROFILES" == "feature-complete" ]]; then
echo " $dc_base --profile=feature-complete up --force-recreate -d"
else
echo " $dc_base up --force-recreate -d"
fi
else
echo " $dc_base up --wait"
fi
fi
echo ""
echo "-----------------------------------------------------------------"

View File

@ -23,8 +23,8 @@ on the host filesystem. Commands that write files should write them to the '/sen
# Actual invocation that runs the command in the container.
invocation() {
$dc up postgres --wait
$dc up redis --wait
start_service_and_wait_ready postgres
start_service_and_wait_ready redis --wait
$dcr --no-deps -v "$VOLUME_MAPPING" -T -e SENTRY_LOG_LEVEL=CRITICAL web "$@" 2>&1
}