Newer
Older
# =========================================================================================
# This program is free software; you can redistribute it and/or modify it under the terms
# of the GNU Lesser General Public License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
# =========================================================================================
# prevent branch pipeline when an MR is open (prefer MR pipeline)
- if: '$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS'
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*tag(,[^],]*)*\]/" && $CI_COMMIT_TAG'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*branch(,[^],]*)*\]/" && $CI_COMMIT_BRANCH'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*mr(,[^],]*)*\]/" && $CI_MERGE_REQUEST_ID'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*default(,[^],]*)*\]/" && $CI_COMMIT_REF_NAME =~ $CI_DEFAULT_BRANCH'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*prod(,[^],]*)*\]/" && $CI_COMMIT_REF_NAME =~ $PROD_REF'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*integ(,[^],]*)*\]/" && $CI_COMMIT_REF_NAME =~ $INTEG_REF'
when: never
- if: '$CI_COMMIT_MESSAGE =~ "/\[(ci skip|skip ci) on ([^],]*,)*dev(,[^],]*)*\]/" && $CI_COMMIT_REF_NAME !~ $PROD_REF && $CI_COMMIT_REF_NAME !~ $INTEG_REF'
when: never
# test job prototype: implement adaptive pipeline rules
.test-policy:
rules:
# on tag: auto & failing
- if: $CI_COMMIT_TAG
# on ADAPTIVE_PIPELINE_DISABLED: auto & failing
- if: '$ADAPTIVE_PIPELINE_DISABLED == "true"'
# on production or integration branch(es): auto & failing
- if: '$CI_COMMIT_REF_NAME =~ $PROD_REF || $CI_COMMIT_REF_NAME =~ $INTEG_REF'
# early stage (dev branch, no MR): manual & non-failing
- if: '$CI_MERGE_REQUEST_ID == null && $CI_OPEN_MERGE_REQUESTS == null'
when: manual
allow_failure: true
# Draft MR: auto & non-failing
- if: '$CI_MERGE_REQUEST_TITLE =~ /^Draft:.*/'
allow_failure: true
# else (Ready MR): auto & failing
- when: on_success
# variabilized tracking image
TBC_TRACKING_IMAGE: "$CI_REGISTRY/to-be-continuous/tools/tracking:master"
DOCKER_LINT_IMAGE: "registry.hub.docker.com/projectatomic/dockerfile-lint:latest"
DOCKER_HADOLINT_IMAGE: "registry.hub.docker.com/hadolint/hadolint:latest-alpine"
DOCKER_IMAGE: "registry.hub.docker.com/library/docker:latest"
DOCKER_DIND_IMAGE: "registry.hub.docker.com/library/docker:dind"
DOCKER_KANIKO_IMAGE: "gcr.io/kaniko-project/executor:debug"
DOCKER_SKOPEO_IMAGE: "quay.io/skopeo/stable:latest"
DOCKER_BUILDAH_IMAGE: "quay.io/buildah/stable:latest"
# for retro-compatibility (deprecated & undocumented)
DOCKER_DOCKERFILE_PATH: "."
DOCKER_FILE: "$DOCKER_DOCKERFILE_PATH/Dockerfile"
DOCKER_CONFIG_FILE: ".docker/config.json"
# When testing a Docker Health (test stage), how long (in seconds) wait for the HealthCheck status (https://docs.docker.com/engine/reference/builder/#healthcheck)
DOCKER_HEALTHCHECK_TIMEOUT: "60"
# Default Docker config uses the internal GitLab registry
DOCKER_SNAPSHOT_IMAGE: "$CI_REGISTRY_IMAGE/snapshot:$CI_COMMIT_REF_SLUG"
DOCKER_RELEASE_IMAGE: "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME"
DOCKER_KANIKO_VERBOSITY: "info"
DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
DOCKER_TRIVY_IMAGE: "registry.hub.docker.com/aquasec/trivy:latest"
DOCKER_TRIVY_ARGS: "--ignore-unfixed --vuln-type os --exit-on-eol 1"
# SBOM genenration image and arguments
DOCKER_SBOM_IMAGE: "registry.hub.docker.com/anchore/syft:debug"
DOCKER_SBOM_OPTS: "--catalogers rpm-db-cataloger,alpmdb-cataloger,apkdb-cataloger,dpkgdb-cataloger,portage-cataloger"
# default: one-click publish
DOCKER_PROD_PUBLISH_STRATEGY: manual
DOCKER_RELEASE_EXTRA_TAGS_PATTERN: "^v?(?P<major>[0-9]+)\\.(?P<minor>[0-9]+)\\.(?P<patch>[0-9]+)(?P<suffix>(?P<prerelease>-[0-9A-Za-z-\\.]+)?(?P<build>\\+[0-9A-Za-z-\\.]+)?)$"
PROD_REF: '/^(master|main)$/'
# default integration ref name (pattern)
INTEG_REF: '/^develop$/'
# don't use CI_PROJECT_TITLE, kaniko doesn't support space in argument right now (https://github.com/GoogleContainerTools/kaniko/issues/1231)
DOCKER_METADATA: >-
--label org.opencontainers.image.url=${CI_PROJECT_URL}
--label org.opencontainers.image.source=${CI_PROJECT_URL}
--label org.opencontainers.image.title=${CI_PROJECT_PATH}
--label org.opencontainers.image.ref.name=${CI_COMMIT_REF_NAME}
--label org.opencontainers.image.revision=${CI_COMMIT_SHA}
--label org.opencontainers.image.created=${CI_JOB_STARTED_AT}
# default to kaniko, possible options : kaniko|buildah|dind
DOCKER_BUILD_TOOL:
value: "kaniko"
options:
- "kaniko"
- "buildah"
- "dind"
description: "The build tool to use for building container image"
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# ==================================================
# Stages definition
# ==================================================
stages:
- build
- package-build
- package-test
- publish
# ==================================================
# Base Jobs definition
# ==================================================
.docker-scripts: &docker-scripts |
# BEGSCRIPT
set -e
function log_info() {
echo -e "[\\e[1;94mINFO\\e[0m] $*"
}
function log_warn() {
echo -e "[\\e[1;93mWARN\\e[0m] $*"
}
function log_error() {
echo -e "[\\e[1;91mERROR\\e[0m] $*"
}
function fail() {
log_error "$*"
exit 1
}
function install_custom_ca_certs() {
certs="${CUSTOM_CA_CERTS:-$DEFAULT_CA_CERTS}"
if [[ -z "$certs" ]]
then
return
fi
# import in system for regular linux (Ubuntu, Debian) image
if [[ -f /etc/ssl/certs/ca-certificates.crt ]]
then
echo "$certs" | tr -d '\r' >> /etc/ssl/certs/ca-certificates.crt
log_info "Custom CA certificates imported in \\e[33;1m/etc/ssl/certs/ca-certificates.crt\\e[0m"
# import in system for regular linux (Fedora, RHEL) image (e.g. Skopeo image)
elif [[ -f /etc/ssl/certs/ca-bundle.crt ]]
then
echo "$certs" | tr -d '\r' >> /etc/ssl/certs/ca-bundle.crt
log_info "Custom CA certificates imported in \\e[33;1m/etc/ssl/certs/ca-bundle.crt\\e[0m"
# kaniko image : specific directory for ca certificates, no standard import tool
elif [[ -d /kaniko/ssl/certs ]]
then
echo "$certs" | tr -d '\r' >> /kaniko/ssl/certs/ca-certificates.crt
log_info "Custom CA certificates configured in \\e[33;1m/kaniko/ssl/certs/ca-certificates.crt\\e[0m"
fi
}
function unscope_variables() {
_scoped_vars=$(env | awk -F '=' "/^scoped__[a-zA-Z0-9_]+=/ {print \$1}" | sort)
if [[ -z "$_scoped_vars" ]]; then return; fi
log_info "Processing scoped variables..."
for _scoped_var in $_scoped_vars
do
_fields=${_scoped_var//__/:}
_condition=$(echo "$_fields" | cut -d: -f3)
case "$_condition" in
if) _not="";;
ifnot) _not=1;;
*)
log_warn "... unrecognized condition \\e[1;91m$_condition\\e[0m in \\e[33;1m${_scoped_var}\\e[0m"
continue
;;
esac
_target_var=$(echo "$_fields" | cut -d: -f2)
_cond_var=$(echo "$_fields" | cut -d: -f4)
_cond_val=$(eval echo "\$${_cond_var}")
_test_op=$(echo "$_fields" | cut -d: -f5)
case "$_test_op" in
defined)
if [[ -z "$_not" ]] && [[ -z "$_cond_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" ]]; then continue;
fi
;;
equals|startswith|endswith|contains|in|equals_ic|startswith_ic|endswith_ic|contains_ic|in_ic)
# comparison operator
# sluggify actual value
_cond_val=$(echo "$_cond_val" | tr '[:punct:]' '_')
# retrieve comparison value
_cmp_val_prefix="scoped__${_target_var}__${_condition}__${_cond_var}__${_test_op}__"
# manage 'ignore case'
if [[ "$_test_op" == *_ic ]]
then
# lowercase everything
_cond_val=$(echo "$_cond_val" | tr '[:upper:]' '[:lower:]')
_cmp_val=$(echo "$_cmp_val" | tr '[:upper:]' '[:lower:]')
fi
case "$_test_op" in
equals*)
if [[ -z "$_not" ]] && [[ "$_cond_val" != "$_cmp_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == "$_cmp_val" ]]; then continue;
if [[ -z "$_not" ]] && [[ "$_cond_val" != "$_cmp_val"* ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == "$_cmp_val"* ]]; then continue;
if [[ -z "$_not" ]] && [[ "$_cond_val" != *"$_cmp_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == *"$_cmp_val" ]]; then continue;
if [[ -z "$_not" ]] && [[ "$_cond_val" != *"$_cmp_val"* ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == *"$_cmp_val"* ]]; then continue;
if [[ -z "$_not" ]] && [[ "__${_cmp_val}__" != *"__${_cond_val}__"* ]]; then continue;
elif [[ "$_not" ]] && [[ "__${_cmp_val}__" == *"__${_cond_val}__"* ]]; then continue;
fi
;;
esac
;;
*)
log_warn "... unrecognized test operator \\e[1;91m${_test_op}\\e[0m in \\e[33;1m${_scoped_var}\\e[0m"
continue
;;
esac
# matches
_val=$(eval echo "\$${_target_var}")
log_info "... apply \\e[32m${_target_var}\\e[0m from \\e[32m\$${_scoped_var}\\e[0m${_val:+ (\\e[33;1moverwrite\\e[0m)}"
_val=$(eval echo "\$${_scoped_var}")
export "${_target_var}"="${_val}"
done
log_info "... done"
}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
# evaluate and export a secret
# - $1: secret variable name
function eval_secret() {
name=$1
value=$(eval echo "\$${name}")
# create the /tmp directory (it is required by the mktemp command)
mkdir -p /tmp
case "$value" in
@b64@*)
decoded=$(mktemp)
errors=$(mktemp)
if echo "$value" | cut -c6- | base64 -d > "${decoded}" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully decoded base64 secret \\e[33;1m${name}\\e[0m"
else
fail "Failed decoding base64 secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
;;
@hex@*)
decoded=$(mktemp)
errors=$(mktemp)
if echo "$value" | cut -c6- | sed 's/\([0-9A-F]\{2\}\)/\\\\x\1/gI' | xargs printf > "${decoded}" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully decoded hexadecimal secret \\e[33;1m${name}\\e[0m"
else
fail "Failed decoding hexadecimal secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
;;
@url@*)
url=$(echo "$value" | cut -c6-)
if command -v curl > /dev/null
then
decoded=$(mktemp)
errors=$(mktemp)
if curl -s -S -f --connect-timeout 5 -o "${decoded}" "$url" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully curl'd secret \\e[33;1m${name}\\e[0m"
else
Pierre Smeyers
committed
log_warn "Failed getting secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
elif command -v wget > /dev/null
then
decoded=$(mktemp)
errors=$(mktemp)
if wget -T 5 -O "${decoded}" "$url" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully wget'd secret \\e[33;1m${name}\\e[0m"
else
Pierre Smeyers
committed
log_warn "Failed getting secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
Pierre Smeyers
committed
log_warn "Couldn't get secret \\e[33;1m${name}\\e[0m: no http client found"
fi
;;
esac
}
function eval_all_secrets() {
encoded_vars=$(env | grep -v '^scoped__' | awk -F '=' '/^[a-zA-Z0-9_]*=@(b64|hex|url)@/ {print $1}')
for var in $encoded_vars
do
eval_secret "$var"
done
}
function wait_for_docker_daemon() {
log_info "Wait for Docker daemon..."
# shellcheck disable=SC2034
for i in $(seq 1 30); do
if ! docker info &> /dev/null; then
log_info "... not responding: wait"
sleep 2
else
log_info "... ready: continue"
return
fi
done
fail "... timeout reached: halt"
awk '{while(match($0,"[$%]{[^}]*}")) {var=substr($0,RSTART+2,RLENGTH-3);val=ENVIRON[var]; gsub(/["\\]/,"\\\\&",val); gsub("\n","\\n",val);gsub("\r","\\r",val); gsub("[$%]{"var"}",val)}}1'
docker_snapshot_authent_token=$(echo -n "${DOCKER_REGISTRY_SNAPSHOT_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}:${DOCKER_REGISTRY_SNAPSHOT_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}" | base64 | tr -d '\n')
docker_snapshot_registry_host=$(echo "$DOCKER_SNAPSHOT_IMAGE" | cut -d/ -f1)
export docker_snapshot_authent_token
export docker_snapshot_registry_host
docker_release_authent_token=$(echo -n "${DOCKER_REGISTRY_RELEASE_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}:${DOCKER_REGISTRY_RELEASE_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}" | base64 | tr -d '\n')
docker_release_registry_host=$(echo "$DOCKER_RELEASE_IMAGE" | cut -d/ -f1)
export docker_release_authent_token
export docker_release_registry_host
docker_snapshot_config_json=$(echo -n "{\"auths\":{\"$docker_snapshot_registry_host\":{\"auth\":\"$docker_snapshot_authent_token\"},\"HttpHeaders\":{\"User-Agent\":\"$USER_AGENT\"}}}")
docker_release_config_json=$(echo -n "{\"auths\":{\"$docker_release_registry_host\":{\"auth\":\"$docker_release_authent_token\"},\"HttpHeaders\":{\"User-Agent\":\"$USER_AGENT\"}}}")
# Create the configuration file for Docker and Kaniko
BUILDTOOL_HOME=${BUILDTOOL_HOME:-$HOME}
mkdir -p "$BUILDTOOL_HOME/.docker"
if [ -f "${DOCKER_CONFIG_FILE}" ]
then
awkenvsubst < "${DOCKER_CONFIG_FILE}" > "$BUILDTOOL_HOME/.docker/config.json"
echo "${docker_snapshot_config_json}" > "$BUILDTOOL_HOME/.docker/config.json"
# Create the configuration file for Skopeo
mkdir -p "$BUILDTOOL_HOME/skopeo/.docker"
echo "${docker_snapshot_config_json}" > "$BUILDTOOL_HOME/skopeo/.docker/src-config.json"
echo "${docker_release_config_json}" > "$BUILDTOOL_HOME/skopeo/.docker/dest-config.json"
log_info "Docker authentication configured for \\e[33;1m${docker_snapshot_registry_host}\\e[0m"
}
# autodetects whether there is an hadolint config file
function autoconfig_hadolint() {
# If present, import hadolint config found inside the git repository
_cfg=$(ls -1 "hadolint.yaml" 2>/dev/null || ls -1 ".hadolint.yaml" 2>/dev/null || echo "")
if [[ -f "$_cfg" ]]
log_info "Using custom Hadolint config (\\e[33;1m${_cfg}\\e[0m)"
export hadolint_config_opts="--config $_cfg"
else
log_info "No Hadolint config found: use default"
fi
}
function create_kaniko_cache_dir() {
# create cache directory if needed
mkdir -p "${CI_PROJECT_DIR}/.cache"
}
function init_workspace() {
install_custom_ca_certs
eval_all_secrets
configure_registries_auth
}
# evaluate the context path
function docker_context_path() {
if [[ "$DOCKER_CONTEXT_PATH" ]]
then
# $DOCKER_CONTEXT_PATH is explicit: use it
echo "$DOCKER_CONTEXT_PATH"
else
# $DOCKER_CONTEXT_PATH unset or empty: assume it's relative to the Dockerfile
dirname "$DOCKER_FILE"
fi
}
function run_build_kaniko() {
docker_image=$1
kaniko_snapshot_image_cache="${KANIKO_SNAPSHOT_IMAGE_CACHE:-${DOCKER_SNAPSHOT_IMAGE%:*}/cache}"
shift
if [[ -n "$DOCKER_REGISTRY_MIRROR" ]]
then
# shellcheck disable=SC2001,SC2086
kaniko_registry_mirror_option="--registry-mirror $(echo ${DOCKER_REGISTRY_MIRROR} | sed "s|^https*://||")"
fi
log_info "Build & deploy image $docker_image"
log_info "Kaniko command: /kaniko/executor --context $(docker_context_path) --dockerfile $DOCKER_FILE --destination $docker_image --cache --cache-dir=${CI_PROJECT_DIR}/.cache --cache-repo=${kaniko_snapshot_image_cache} --verbosity $DOCKER_KANIKO_VERBOSITY $kaniko_registry_mirror_option $DOCKER_METADATA $DOCKER_BUILD_ARGS $*"
/kaniko/executor --context "$(docker_context_path)" --dockerfile "$DOCKER_FILE" --destination "$docker_image" --cache --cache-dir="${CI_PROJECT_DIR}/.cache" --cache-repo="${kaniko_snapshot_image_cache}" --verbosity $DOCKER_KANIKO_VERBOSITY $kaniko_registry_mirror_option $DOCKER_METADATA $DOCKER_BUILD_ARGS "$@"
# Used by containers tools like buildah, skopeo.
function configure_containers_registries() {
if [[ -n "$CONTAINER_REGISTRIES_CONFIG_FILE" ]]
then
BUILDTOOL_HOME=${BUILDTOOL_HOME:-$HOME}
mkdir -p "$BUILDTOOL_HOME/.config/containers"
echo "${CONTAINER_REGISTRIES_CONFIG_FILE}" > "$BUILDTOOL_HOME/.config/containers/registries.conf"
log_info "Configured $BUILDTOOL_HOME/.config/containers/registries.conf"
function publish_extra_tags() {
if [[ -z "$DOCKER_RELEASE_EXTRA_TAGS" ]]
then
return
fi
# check if tag matches pattern
# shellcheck disable=SC2154
matches=$(python3 -c "import re;print('match' if re.match(r'$DOCKER_RELEASE_EXTRA_TAGS_PATTERN', '$docker_tag') else '')")
if [[ "$matches" ]]
then
# apply extra tags substitution
extra_tags=$(python3 -c "import re;print(re.sub(r'$DOCKER_RELEASE_EXTRA_TAGS_PATTERN', r'$DOCKER_RELEASE_EXTRA_TAGS', '$docker_tag'))")
log_info "Pushing extra tags (evaluated from original tag \\e[33;1m${docker_tag}\\e[0m)..."
for extra_tag in $extra_tags
do
log_info "... pushing extra tag: \\e[33;1m${extra_tag}\\e[0m..."
# shellcheck disable=SC2086,SC2154
skopeo copy --src-authfile "$BUILDTOOL_HOME/skopeo/.docker/dest-config.json" --dest-authfile "$BUILDTOOL_HOME/skopeo/.docker/dest-config.json" ${DOCKER_PUBLISH_ARGS} "docker://$DOCKER_RELEASE_IMAGE" "docker://$docker_repository:$extra_tag"
done
else
log_info "Extra tags configured, but the released tag (\\e[33;1m${docker_tag}\\e[0m) doesn't match \$DOCKER_RELEASE_EXTRA_TAGS_PATTERN..."
fi
}
init_workspace
# ENDSCRIPT
.docker-base:
services:
command: ["--service", "docker", "5.5.1"]
before_script:
- *docker-scripts
.docker-kaniko-base:
extends: .docker-base
image:
name: "$DOCKER_KANIKO_IMAGE"
entrypoint: [""]
variables:
cache:
key: "$CI_COMMIT_REF_SLUG-docker"
paths:
- .cache
before_script:
- *docker-scripts
- create_kaniko_cache_dir
.docker-dind-base:
extends: .docker-base
image: $DOCKER_IMAGE
variables:
# disable TLS between Docker client and Docker daemon : https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-disabled
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
# make visible DEFAULT_CA_CERTS and CUSTOM_CA_CERTS variables to the service (we MUST use different variable names)
_DEFAULT_CA_CERTS: "${DEFAULT_CA_CERTS}"
_CUSTOM_CA_CERTS: "${CUSTOM_CA_CERTS}"
_TRACE: "${TRACE}"
services:
command: ["--service", "docker", "5.5.1"]
- name: $DOCKER_DIND_IMAGE
alias: docker
command:
- /bin/sh
- -c
- |
if [[ -n "${_CUSTOM_CA_CERTS:-$_DEFAULT_CA_CERTS}" ]]; then echo "${_CUSTOM_CA_CERTS:-$_DEFAULT_CA_CERTS}" | tr -d '\r' >> /etc/ssl/certs/ca-certificates.crt; fi || exit
if [[ -n "${_TRACE}" ]]; then echo "Here is the list of all CAs that are trusted by the Docker daemon:"; cat /etc/ssl/certs/ca-certificates.crt; fi
if [[ -n "${DOCKER_REGISTRY_MIRROR}" ]]; then dockerd-entrypoint.sh --registry-mirror ${DOCKER_REGISTRY_MIRROR}; else dockerd-entrypoint.sh; fi || exit
before_script:
- *docker-scripts
- if ! wait_for_docker_daemon; then fail "Docker-in-Docker is not enabled on this runner. Either use a Docker-in-Docker capable runner, or disable this job by setting \$DOCKER_BUILD_TOOL to a different value"; fi
# ==================================================
# Stage: build
# ==================================================
# lint-job is used to check the syntax of the Dockerfile for best practices.
docker-lint:
image: "$DOCKER_LINT_IMAGE"
extends: .docker-base
stage: build
dependencies: []
script:
- dockerfile_lint -f $DOCKER_FILE $DOCKER_LINT_ARGS
rules:
# disable if DOCKER_LINT_ENABLED not set
- if: '$DOCKER_LINT_ENABLED != "true"'
when: never
- !reference [.test-policy, rules]
docker-hadolint:
image:
name: "$DOCKER_HADOLINT_IMAGE"
entrypoint: [""]
extends: .docker-base
stage: build
dependencies: []
script:
- autoconfig_hadolint
Pierre Smeyers
committed
- dockerfile_hash=$(echo "$DOCKER_FILE" | md5sum | cut -d" " -f1)
# Output in Code Climate format (GitLab integration)
- hadolint --no-fail -f gitlab_codeclimate $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE" > "reports/docker-hadolint-${dockerfile_hash}.codeclimate.json"
# Output in JSON format
if [[ "$DEFECTDOJO_HADOLINT_REPORTS" ]]
then
hadolint --no-fail -f json $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE" > "reports/docker-hadolint-${dockerfile_hash}.native.json"
fi
# last run with console output (with failure)
- hadolint $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE"
artifacts:
name: "$CI_JOB_NAME artifacts from $CI_PROJECT_NAME on $CI_COMMIT_REF_SLUG"
expire_in: 1 day
when: always
reports:
codequality:
rules:
# exclude if DOCKER_HADOLINT_DISABLED set
- if: '$DOCKER_HADOLINT_DISABLED == "true"'
# ==================================================
# Stage: package-build
# ==================================================
docker-kaniko-build:
extends: .docker-kaniko-base
stage: package-build
script:
- run_build_kaniko "$DOCKER_SNAPSHOT_IMAGE" --digest-file .img-digest.txt --build-arg http_proxy="$http_proxy" --build-arg https_proxy="$https_proxy" --build-arg no_proxy="$no_proxy"
- docker_repository=${DOCKER_SNAPSHOT_IMAGE%:*}
- docker_tag=${DOCKER_SNAPSHOT_IMAGE##*:}
- |
{
echo "docker_image=$DOCKER_SNAPSHOT_IMAGE"
echo "docker_image_digest=$docker_repository@$docker_digest"
echo "docker_repository=$docker_repository"
echo "docker_tag=$docker_tag"
echo "docker_digest=$docker_digest"
} > docker.env
artifacts:
reports:
dotenv:
- docker.env
rules:
- if: '$DOCKER_BUILD_TOOL == "kaniko"'
docker-dind-build:
extends: .docker-dind-base
stage: package-build
script:
- docker pull $DOCKER_SNAPSHOT_IMAGE || true
# Build using cache if exist
- docker build --file "$DOCKER_FILE" --cache-from $DOCKER_SNAPSHOT_IMAGE --tag $DOCKER_SNAPSHOT_IMAGE --build-arg http_proxy="$http_proxy" --build-arg https_proxy="$https_proxy" --build-arg no_proxy="$no_proxy" $DOCKER_METADATA $DOCKER_BUILD_ARGS "$(docker_context_path)"
- docker push $DOCKER_SNAPSHOT_IMAGE
# Display the size of each layer
- docker history $DOCKER_SNAPSHOT_IMAGE
# Display the total size of the image
- docker images --digests $DOCKER_SNAPSHOT_IMAGE
# create dotenv file
- image_with_digest=$(docker inspect --format '{{index .RepoDigests 0}}' "$DOCKER_SNAPSHOT_IMAGE")
- docker_digest=${image_with_digest##*@}
- docker_repository=${DOCKER_SNAPSHOT_IMAGE%:*}
- docker_tag=${DOCKER_SNAPSHOT_IMAGE##*:}
- |
{
echo "docker_image=$DOCKER_SNAPSHOT_IMAGE"
echo "docker_image_digest=$docker_repository@$docker_digest"
echo "docker_repository=$docker_repository"
echo "docker_tag=$docker_tag"
echo "docker_digest=$docker_digest"
} > docker.env
artifacts:
reports:
dotenv:
- docker.env
rules:
- if: '$DOCKER_BUILD_TOOL == "dind"'
docker-buildah-build:
extends: .docker-base
stage: package-build
image: "$DOCKER_BUILDAH_IMAGE"
script:
- configure_containers_registries
# derive buildah_build_cache repository
- buildah_build_cache="${DOCKER_SNAPSHOT_IMAGE%:*}/cache"
- log_info "Using ${buildah_build_cache} as build cache repository"
- buildah build --file "$DOCKER_FILE" --tag $DOCKER_SNAPSHOT_IMAGE --layers --cache-from $buildah_build_cache --cache-to $buildah_build_cache --build-arg http_proxy="$http_proxy" --build-arg https_proxy="$https_proxy" --build-arg no_proxy="$no_proxy" $DOCKER_METADATA $DOCKER_BUILD_ARGS "$(docker_context_path)"
- buildah push --digestfile .img-digest.txt "$DOCKER_SNAPSHOT_IMAGE"
# display digest of the resulting image
- cat .img-digest.txt
# create dotenv file
- docker_digest=$(cat .img-digest.txt)
- docker_repository=${DOCKER_SNAPSHOT_IMAGE%:*}
- docker_tag=${DOCKER_SNAPSHOT_IMAGE##*:}
- |
{
echo "docker_image=$DOCKER_SNAPSHOT_IMAGE"
echo "docker_image_digest=$docker_repository@$docker_digest"
echo "docker_repository=$docker_repository"
echo "docker_tag=$docker_tag"
echo "docker_digest=$docker_digest"
} > docker.env
artifacts:
reports:
dotenv:
- docker.env
rules:
- if: '$DOCKER_BUILD_TOOL == "buildah"'
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
# ==================================================
# Stage: package-test
# ==================================================
# Tests should be run as a health_check. If so, you don't need to edit this job
docker-healthcheck:
extends: .docker-dind-base
variables:
GIT_STRATEGY: none
dependencies: []
stage: package-test
script: |
# Test by internal health_check (Recommended way, more info https://docs.docker.com/engine/reference/builder/#healthcheck)
# This looks complicated but you normally don't have to touch this...
function unexpected_error() {
log_error "Unexpected error"
if [ -n "$container_id" ]
then
docker logs $container_id
fi
exit 1
}
docker pull $DOCKER_SNAPSHOT_IMAGE
timestamp_from=$(( $(date +%s) - 1 ))
container_id=$(docker run -d $DOCKER_HEALTHCHECK_OPTIONS $DOCKER_SNAPSHOT_IMAGE $DOCKER_HEALTHCHECK_CONTAINER_ARGS)
log_info "container_id=$container_id"
waiting_time=0
healthcheck_result="timeout"
while [ $waiting_time -lt $DOCKER_HEALTHCHECK_TIMEOUT -a "$healthcheck_result" != "healthy" -a "$healthcheck_result" != "dead" ]
do
waiting_time=$(( $waiting_time + 5))
timestamp_to=$(( $timestamp_from + $waiting_time ))
log_info "Testing events between $timestamp_from and $timestamp_to ..."
full_result=$(docker events --filter container=$container_id --format="{{.Status}}" --since $timestamp_from --until $timestamp_to) || unexpected_error
if echo "$full_result" | grep ': healthy$' >/dev/null
then
healthcheck_result="healthy"
elif echo "$full_result" | grep ': unhealthy$' >/dev/null
then
log_warn "\\e[93mContainer is unhealthy\\e[0m"
healthcheck_result="unhealthy"
elif echo "$full_result" | grep '^die$' >/dev/null
then
log_error "Container died"
healthcheck_result="dead"
else
healthcheck_result="timeout"
fi
done
log_info "Container logs:"
docker logs $container_id
log_info "Docker inspect:"
docker inspect $container_id
if [ "$healthcheck_result" == "healthy" ]
then
log_info "Container is healthy"
else
log_error "HealthCheck test error, reason: $healthcheck_result"
echo -e "Full logs:\n$full_result"
exit 1
fi
rules:
- if: '$DOCKER_HEALTHCHECK_DISABLED == "true"'
- if: '$DOCKER_BUILD_TOOL != "dind"'
# Security audit with trivy
# This is a non-blocking job, it will always return (code) 0
docker-trivy:
extends: .docker-base
dependencies: []
image:
name: $DOCKER_TRIVY_IMAGE
entrypoint: [""]
stage: package-test
variables:
TRIVY_CACHE_DIR: ".trivycache/"
# cache cleanup is needed when scanning images with the same tags, it does not remove the database
trivy image --clear-cache
export TRIVY_USERNAME=${DOCKER_REGISTRY_SNAPSHOT_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}
export TRIVY_PASSWORD=${DOCKER_REGISTRY_SNAPSHOT_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}
basename=$(echo "${DOCKER_SNAPSHOT_IMAGE}" | sed 's|[/:]|_|g')
if [[ -z "${DOCKER_TRIVY_ADDR}" ]]; then
log_warn "\\e[93mYou are using Trivy in standalone mode. To get faster scans, consider setting the DOCKER_TRIVY_ADDR variable to the address of a Trivy server. More info here: https://aquasecurity.github.io/trivy/latest/docs/references/modes/client-server/\\e[0m"
trivy image --download-db-only
export trivy_opts="image"
else
log_info "You are using Trivy in client/server mode with the following server: ${DOCKER_TRIVY_ADDR}"
export trivy_opts="image --server ${DOCKER_TRIVY_ADDR}"
fi
# Add common trivy arguments
export trivy_opts="${trivy_opts} --no-progress --severity ${DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD} ${DOCKER_TRIVY_ARGS}"
# GitLab format (no fail)
trivy ${trivy_opts} --format template --exit-code 0 --template "@/contrib/gitlab.tpl" --output reports/docker-trivy-${basename}.gitlab.json $DOCKER_SNAPSHOT_IMAGE
# JSON format (no fail)
if [[ "$DEFECTDOJO_TRIVY_REPORTS" ]]
then
trivy ${trivy_opts} --format json --exit-code 0 --output reports/docker-trivy-${basename}.native.json $DOCKER_SNAPSHOT_IMAGE
fi
# console output (fail)
trivy ${trivy_opts} --format table --exit-code 1 $DOCKER_SNAPSHOT_IMAGE
container_scanning: "reports/docker-trivy-*.gitlab.json"
cache:
paths:
- .trivycache/
- if: '$DOCKER_TRIVY_DISABLED == "true"'
docker-sbom:
extends: .docker-base
stage: package-test
image:
name: $DOCKER_SBOM_IMAGE
entrypoint: [""]
# force no dependency
dependencies: []
script:
- mkdir -p -m 777 reports
- basename=$(echo "${DOCKER_SNAPSHOT_IMAGE}" | sed 's|[/:]|_|g')
- /syft packages ${TRACE+-vv} $DOCKER_SNAPSHOT_IMAGE $DOCKER_SBOM_OPTS -o cyclonedx-json=reports/docker-sbom-${basename}.cyclonedx.json
- chmod a+r reports/docker-sbom-${basename}.cyclonedx.json
artifacts:
name: "SBOM for docker from $CI_PROJECT_NAME on $CI_COMMIT_REF_SLUG"
expire_in: 1 week
when: always
paths:
- "reports/docker-sbom-*.cyclonedx.json"
- "reports/docker-sbom-*.cyclonedx.json"
rules:
# exclude if disabled
- if: '$DOCKER_SBOM_DISABLED == "true"'
when: never
- !reference [.test-policy, rules]
# ==================================================
# Stage: publish
# ==================================================
# When semantic release is integrated, this stage run on main pipeline
# When semantic release is not integrated, this stage only run when you put a new tag to the git repository (a good tag format would be x.x.x ex: 1.0.2, see https://semver.org/)
# In both cases, it will push the release tagged image to the chosen Registry
docker-publish:
extends: .docker-base
image:
name: "$DOCKER_SKOPEO_IMAGE"
entrypoint: [""]
stage: publish
variables:
GIT_STRATEGY: none
script:
- |
if [[ "${SEMREL_INFO_ON}" && "${DOCKER_SEMREL_RELEASE_DISABLED}" != "true" ]]
then
if [[ -z "${SEMREL_INFO_NEXT_VERSION}" ]]
then
log_warn "[semantic-release] no new version to release: skip"
exit 0
else
DOCKER_RELEASE_IMAGE=$(echo "$DOCKER_RELEASE_IMAGE" | sed "s/\(:.*\)\{0,1\}$/:$SEMREL_INFO_NEXT_VERSION/")
log_info "[semantic-release] new Image tag is set: $DOCKER_RELEASE_IMAGE"
fi
fi
if [[ "$DOCKER_SNAPSHOT_IMAGE" == "$DOCKER_RELEASE_IMAGE" ]]
then
log_warn "\\e[93mYou should consider distinguishing snapshot and release images as they do not differ. Skipping publish phase as image has already been created by previous job.\\e[0m"
exit 0
fi
- BUILDTOOL_HOME=${BUILDTOOL_HOME:-$HOME}
# 1: push main image
- skopeo copy --src-authfile "$BUILDTOOL_HOME/skopeo/.docker/src-config.json" --dest-authfile "$BUILDTOOL_HOME/skopeo/.docker/dest-config.json" ${DOCKER_PUBLISH_ARGS} "docker://$DOCKER_SNAPSHOT_IMAGE" "docker://$DOCKER_RELEASE_IMAGE"
- |
log_info "Well done your image is pushed and can be pulled with: docker pull $DOCKER_RELEASE_IMAGE"
# 2: extract info and generate output dotenv
- docker_digest=$(skopeo inspect --authfile "$BUILDTOOL_HOME/skopeo/.docker/dest-config.json" --format='{{ .Digest }}' "docker://$DOCKER_RELEASE_IMAGE")
- docker_repository=${DOCKER_RELEASE_IMAGE%:*}
- docker_tag=${DOCKER_RELEASE_IMAGE##*:}
- |
{
echo "docker_image=$DOCKER_RELEASE_IMAGE"
echo "docker_image_digest=$docker_repository@$docker_digest"
echo "docker_repository=$docker_repository"
echo "docker_tag=$docker_tag"
echo "docker_digest=$docker_digest"
} > docker.env
- publish_extra_tags
artifacts:
reports:
dotenv:
- docker.env
rules:
# on tag: if semrel info not enabled or semrel integration disabled
- if: '$CI_COMMIT_TAG && ($SEMREL_INFO_ON == null || $SEMREL_INFO_ON == "" || $DOCKER_SEMREL_RELEASE_DISABLED == "true")'
# exclude non-production branches
- if: '$CI_COMMIT_REF_NAME !~ $PROD_REF'
when: never
# exclude if snapshot is same as release image and semrel info not enabled or semrel integration disabled
- if: '$DOCKER_SNAPSHOT_IMAGE == $DOCKER_RELEASE_IMAGE && ($SEMREL_INFO_ON == null || $SEMREL_INFO_ON == "" || $DOCKER_SEMREL_RELEASE_DISABLED == "true")'
# support former variable (prevent breaking change)
- if: '$PUBLISH_ON_PROD == "false"'
when: never
- if: '$DOCKER_PROD_PUBLISH_STRATEGY == "manual"'
when: manual
- if: '$DOCKER_PROD_PUBLISH_STRATEGY == "auto"'