Newer
Older
# =========================================================================================
#
# This program is free software; you can redistribute it and/or modify it under the terms
# of the GNU Lesser General Public License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
# =========================================================================================
# default workflow rules
workflow:
rules:
# exclude merge requests
- if: $CI_MERGE_REQUEST_ID
when: never
- when: always
# variabilized tracking image
TBC_TRACKING_IMAGE: "$CI_REGISTRY/to-be-continuous/tools/tracking:master"
DOCKER_LINT_IMAGE: "projectatomic/dockerfile-lint:latest"
DOCKER_HADOLINT_IMAGE: "hadolint/hadolint:latest-alpine"
DOCKER_IMAGE: "docker:latest"
DOCKER_DIND_IMAGE: "docker:dind"
DOCKER_KANIKO_IMAGE: "gcr.io/kaniko-project/executor:debug"
DOCKER_SKOPEO_IMAGE: "quay.io/skopeo/stable:latest"
# for retro-compatibility (deprecated & undocumented)
DOCKER_DOCKERFILE_PATH: "."
DOCKER_FILE: "$DOCKER_DOCKERFILE_PATH/Dockerfile"
# When testing a Docker Health (test stage), how long (in seconds) wait for the HealthCheck status (https://docs.docker.com/engine/reference/builder/#healthcheck)
DOCKER_HEALTHCHECK_TIMEOUT: "60"
# Default Docker config uses the internal GitLab registry
DOCKER_SNAPSHOT_IMAGE: "$CI_REGISTRY_IMAGE/snapshot:$CI_COMMIT_REF_SLUG"
DOCKER_RELEASE_IMAGE: "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME"
DOCKER_KANIKO_VERBOSITY: "info"
DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
DOCKER_TRIVY_IMAGE: "aquasec/trivy:latest"
DOCKER_TRIVY_ARGS: "--ignore-unfixed --vuln-type os"
# by default: DevOps pipeline
PUBLISH_ON_PROD: "true"
# default production ref name (pattern)
PROD_REF: '/^(master|main)$/'
# default integration ref name (pattern)
INTEG_REF: '/^develop$/'
# don't use CI_PROJECT_TITLE, kaniko doesn't support space in argument right now (https://github.com/GoogleContainerTools/kaniko/issues/1231)
DOCKER_METADATA: >-
--label org.opencontainers.image.url=${CI_PROJECT_URL}
--label org.opencontainers.image.source=${CI_PROJECT_URL}
--label org.opencontainers.image.title=${CI_PROJECT_PATH}
--label org.opencontainers.image.ref.name=${CI_COMMIT_REF_NAME}
--label org.opencontainers.image.revision=${CI_COMMIT_SHA}
--label org.opencontainers.image.created=${CI_JOB_STARTED_AT}
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# ==================================================
# Stages definition
# ==================================================
stages:
- build
- package-build
- package-test
- publish
# ==================================================
# Base Jobs definition
# ==================================================
.docker-scripts: &docker-scripts |
# BEGSCRIPT
set -e
function log_info() {
echo -e "[\\e[1;94mINFO\\e[0m] $*"
}
function log_warn() {
echo -e "[\\e[1;93mWARN\\e[0m] $*"
}
function log_error() {
echo -e "[\\e[1;91mERROR\\e[0m] $*"
}
function fail() {
log_error "$*"
exit 1
}
function install_custom_ca_certs() {
certs="${CUSTOM_CA_CERTS:-$DEFAULT_CA_CERTS}"
if [[ -z "$certs" ]]
then
return
fi
# import in system for regular linux (Ubuntu, Debian) image
if [[ -f /etc/ssl/certs/ca-certificates.crt ]]
then
echo "$certs" | tr -d '\r' >> /etc/ssl/certs/ca-certificates.crt
log_info "Custom CA certificates imported in \\e[33;1m/etc/ssl/certs/ca-certificates.crt\\e[0m"
# import in system for regular linux (Fedora, RHEL) image (e.g. Skopeo image)
elif [[ -f /etc/ssl/certs/ca-bundle.crt ]]
then
echo "$certs" | tr -d '\r' >> /etc/ssl/certs/ca-bundle.crt
log_info "Custom CA certificates imported in \\e[33;1m/etc/ssl/certs/ca-bundle.crt\\e[0m"
# kaniko image : specific directory for ca certificates, no standard import tool
elif [[ -d /kaniko/ssl/certs ]]
then
echo "$certs" | tr -d '\r' >> /kaniko/ssl/certs/ca-certificates.crt
log_info "Custom CA certificates configured in \\e[33;1m/kaniko/ssl/certs/ca-certificates.crt\\e[0m"
fi
}
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
function unscope_variables() {
_scoped_vars=$(env | awk -F '=' "/^scoped__[a-zA-Z0-9_]+=/ {print \$1}" | sort)
if [[ -z "$_scoped_vars" ]]; then return; fi
log_info "Processing scoped variables..."
for _scoped_var in $_scoped_vars
do
_fields=${_scoped_var//__/:}
_condition=$(echo "$_fields" | cut -d: -f3)
case "$_condition" in
if) _not="";;
ifnot) _not=1;;
*)
log_warn "... unrecognized condition \\e[1;91m$_condition\\e[0m in \\e[33;1m${_scoped_var}\\e[0m"
continue
;;
esac
_target_var=$(echo "$_fields" | cut -d: -f2)
_cond_var=$(echo "$_fields" | cut -d: -f4)
_cond_val=$(eval echo "\$${_cond_var}")
_test_op=$(echo "$_fields" | cut -d: -f5)
case "$_test_op" in
defined)
if [[ -z "$_not" ]] && [[ -z "$_cond_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" ]]; then continue;
fi
;;
equals|startswith|endswith|contains|in|equals_ic|startswith_ic|endswith_ic|contains_ic|in_ic)
# comparison operator
# sluggify actual value
_cond_val=$(echo "$_cond_val" | tr '[:punct:]' '_')
# retrieve comparison value
_cmp_val_prefix="scoped__${_target_var}__${_condition}__${_cond_var}__${_test_op}__"
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
# manage 'ignore case'
if [[ "$_test_op" == *_ic ]]
then
# lowercase everything
_cond_val=$(echo "$_cond_val" | tr '[:upper:]' '[:lower:]')
_cmp_val=$(echo "$_cmp_val" | tr '[:upper:]' '[:lower:]')
fi
case "$_test_op" in
equals*)
if [[ -z "$_not" ]] && [[ "$_cond_val" != "$_cmp_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == "$_cmp_val" ]]; then continue;
fi
;;
startswith*)
if [[ -z "$_not" ]] && [[ "$_cond_val" != "$_cmp_val"* ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == "$_cmp_val"* ]]; then continue;
fi
;;
endswith*)
if [[ -z "$_not" ]] && [[ "$_cond_val" != *"$_cmp_val" ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == *"$_cmp_val" ]]; then continue;
fi
;;
contains*)
if [[ -z "$_not" ]] && [[ "$_cond_val" != *"$_cmp_val"* ]]; then continue;
elif [[ "$_not" ]] && [[ "$_cond_val" == *"$_cmp_val"* ]]; then continue;
fi
;;
in*)
if [[ -z "$_not" ]] && [[ "__${_cmp_val}__" != *"__${_cond_val}__"* ]]; then continue;
elif [[ "$_not" ]] && [[ "__${_cmp_val}__" == *"__${_cond_val}__"* ]]; then continue;
fi
;;
esac
;;
*)
log_warn "... unrecognized test operator \\e[1;91m${_test_op}\\e[0m in \\e[33;1m${_scoped_var}\\e[0m"
continue
;;
esac
# matches
_val=$(eval echo "\$${_target_var}")
log_info "... apply \\e[32m${_target_var}\\e[0m from \\e[32m\$${_scoped_var}\\e[0m${_val:+ (\\e[33;1moverwrite\\e[0m)}"
_val=$(eval echo "\$${_scoped_var}")
export "${_target_var}"="${_val}"
done
log_info "... done"
}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# evaluate and export a secret
# - $1: secret variable name
function eval_secret() {
name=$1
value=$(eval echo "\$${name}")
# create the /tmp directory (it is required by the mktemp command)
mkdir -p /tmp
case "$value" in
@b64@*)
decoded=$(mktemp)
errors=$(mktemp)
if echo "$value" | cut -c6- | base64 -d > "${decoded}" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully decoded base64 secret \\e[33;1m${name}\\e[0m"
else
fail "Failed decoding base64 secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
;;
@hex@*)
decoded=$(mktemp)
errors=$(mktemp)
if echo "$value" | cut -c6- | sed 's/\([0-9A-F]\{2\}\)/\\\\x\1/gI' | xargs printf > "${decoded}" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully decoded hexadecimal secret \\e[33;1m${name}\\e[0m"
else
fail "Failed decoding hexadecimal secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
;;
@url@*)
url=$(echo "$value" | cut -c6-)
if command -v curl > /dev/null
then
decoded=$(mktemp)
errors=$(mktemp)
if curl -s -S -f --connect-timeout 5 -o "${decoded}" "$url" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully curl'd secret \\e[33;1m${name}\\e[0m"
else
Pierre Smeyers
committed
log_warn "Failed getting secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
fi
elif command -v wget > /dev/null
then
decoded=$(mktemp)
errors=$(mktemp)
if wget -T 5 -O "${decoded}" "$url" 2> "${errors}"
then
# shellcheck disable=SC2086
export ${name}="$(cat ${decoded})"
log_info "Successfully wget'd secret \\e[33;1m${name}\\e[0m"
else
Pierre Smeyers
committed
log_warn "Failed getting secret \\e[33;1m${name}\\e[0m:\\n$(sed 's/^/... /g' "${errors}")"
Pierre Smeyers
committed
log_warn "Couldn't get secret \\e[33;1m${name}\\e[0m: no http client found"
fi
;;
esac
}
function eval_all_secrets() {
encoded_vars=$(env | grep -v '^scoped__' | awk -F '=' '/^[a-zA-Z0-9_]*=@(b64|hex|url)@/ {print $1}')
for var in $encoded_vars
do
eval_secret "$var"
done
}
function is_runner_dind_capable() {
docker info > /dev/null 2>&1
}
function configure_registries_auth() {
docker_snapshot_authent_token=$(echo -n "${DOCKER_REGISTRY_SNAPSHOT_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}:${DOCKER_REGISTRY_SNAPSHOT_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}" | base64 | tr -d '\n')
docker_snapshot_registry_host=$(echo "$DOCKER_SNAPSHOT_IMAGE" | cut -d/ -f1)
docker_release_authent_token=$(echo -n "${DOCKER_REGISTRY_RELEASE_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}:${DOCKER_REGISTRY_RELEASE_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}" | base64 | tr -d '\n')
docker_release_registry_host=$(echo "$DOCKER_RELEASE_IMAGE" | cut -d/ -f1)
docker_snapshot_config_json=$(echo -n "{\"auths\":{\"$docker_snapshot_registry_host\":{\"auth\":\"$docker_snapshot_authent_token\"},\"HttpHeaders\":{\"User-Agent\":\"$USER_AGENT\"}}}")
docker_release_config_json=$(echo -n "{\"auths\":{\"$docker_release_registry_host\":{\"auth\":\"$docker_release_authent_token\"},\"HttpHeaders\":{\"User-Agent\":\"$USER_AGENT\"}}}")
# Create the configuration file for Docker
mkdir -p /root/.docker
echo "${docker_snapshot_config_json}" > /root/.docker/config.json
# Create the configuration file for Kaniko
mkdir -p /kaniko/.docker
echo "${docker_snapshot_config_json}" > /kaniko/.docker/config.json
# Create the configuration file for Skopeo
mkdir -p /skopeo/.docker
echo "${docker_snapshot_config_json}" > /skopeo/.docker/src-config.json
echo "${docker_release_config_json}" > /skopeo/.docker/dest-config.json
log_info "Docker authentication configured for \\e[33;1m${docker_snapshot_registry_host}\\e[0m"
}
# autodetects whether there is an hadolint config file
function autoconfig_hadolint() {
# If present, import hadolint config found inside the git repository
if [[ -f "hadolint.yaml" ]]
then
log_info "Using custom Hadolint config (\\e[33;1mhadolint.yaml\\e[0m)"
export hadolint_config_opts="--config ./hadolint.yaml"
else
log_info "No Hadolint config found: use default"
fi
}
function create_kaniko_cache_dir() {
# create cache directory if needed
mkdir -p "$KANIKO_CACHE_DIR"
}
function init_workspace() {
install_custom_ca_certs
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
eval_all_secrets
configure_registries_auth
if is_runner_dind_capable
then
docker info
elif [[ -x /kaniko/executor ]]
then
create_kaniko_cache_dir
fi
}
# evaluate the context path
function docker_context_path() {
if [[ "$DOCKER_CONTEXT_PATH" ]]
then
# $DOCKER_CONTEXT_PATH is explicit: use it
echo "$DOCKER_CONTEXT_PATH"
else
# $DOCKER_CONTEXT_PATH unset or empty: assume it's relative to the Dockerfile
dirname "$DOCKER_FILE"
fi
}
function run_build_kaniko() {
docker_image=$1
shift
if [[ -n "$DOCKER_REGISTRY_MIRROR" ]]
then
# shellcheck disable=SC2001,SC2086
kaniko_registry_mirror_option="--registry-mirror $(echo ${DOCKER_REGISTRY_MIRROR} | sed "s|^https*://||")"
fi
log_info "Build & deploy image $docker_image"
log_info "Kaniko command: /kaniko/executor --context $(docker_context_path) --dockerfile $DOCKER_FILE --destination $docker_image --cache --cache-dir=$KANIKO_CACHE_DIR --verbosity $DOCKER_KANIKO_VERBOSITY $kaniko_registry_mirror_option $DOCKER_METADATA $DOCKER_BUILD_ARGS $*"
/kaniko/executor --context "$(docker_context_path)" --dockerfile "$DOCKER_FILE" --destination "$docker_image" --cache --cache-dir="$KANIKO_CACHE_DIR" --verbosity $DOCKER_KANIKO_VERBOSITY $kaniko_registry_mirror_option $DOCKER_METADATA $DOCKER_BUILD_ARGS "$@"
}
init_workspace
# ENDSCRIPT
.docker-base:
services:
command: ["--service", "docker", "2.5.0"]
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
before_script:
- *docker-scripts
.docker-kaniko-base:
extends: .docker-base
image:
name: "$DOCKER_KANIKO_IMAGE"
entrypoint: [""]
variables:
KANIKO_CACHE_DIR: "${CI_PROJECT_DIR}/.cache"
cache:
key: "$CI_COMMIT_REF_SLUG-docker"
paths:
- .cache
.docker-dind-base:
extends: .docker-base
image: $DOCKER_IMAGE
variables:
# disable TLS between Docker client and Docker daemon : https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-disabled
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
# make visible DEFAULT_CA_CERTS and CUSTOM_CA_CERTS variables to the service (we MUST use different variable names)
_DEFAULT_CA_CERTS: "${DEFAULT_CA_CERTS}"
_CUSTOM_CA_CERTS: "${CUSTOM_CA_CERTS}"
_TRACE: "${TRACE}"
services:
command: ["--service", "docker", "2.5.0"]
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
- name: $DOCKER_DIND_IMAGE
alias: docker
command:
- /bin/sh
- -c
- |
if [[ -n "${_CUSTOM_CA_CERTS:-$_DEFAULT_CA_CERTS}" ]]; then echo "${_CUSTOM_CA_CERTS:-$_DEFAULT_CA_CERTS}" | tr -d '\r' >> /etc/ssl/certs/ca-certificates.crt; fi || exit
if [[ -n "${_TRACE}" ]]; then echo "Here is the list of all CAs that are trusted by the Docker daemon:"; cat /etc/ssl/certs/ca-certificates.crt; fi
if [[ -n "${DOCKER_REGISTRY_MIRROR}" ]]; then dockerd-entrypoint.sh --registry-mirror ${DOCKER_REGISTRY_MIRROR}; else dockerd-entrypoint.sh; fi || exit
before_script:
- *docker-scripts
- if ! is_runner_dind_capable; then fail "Docker-in-Docker is not enabled on this runner. Either use a Docker-in-Docker capable runner, or disable this job by unsetting \$DOCKER_DIND_BUILD"; fi
# ==================================================
# Stage: build
# ==================================================
# lint-job is used to check the syntax of the Dockerfile for best practices.
docker-lint:
image: "$DOCKER_LINT_IMAGE"
extends: .docker-base
stage: build
dependencies: []
script:
- dockerfile_lint -f $DOCKER_FILE $DOCKER_LINT_ARGS
rules:
# execute if DOCKER_LINT_ENABLED set
# on production or integration branches:
- if: '$DOCKER_LINT_ENABLED == "true" && ($CI_COMMIT_REF_NAME =~ $PROD_REF || $CI_COMMIT_REF_NAME =~ $INTEG_REF)'
- if: '$DOCKER_LINT_ENABLED == "true"'
allow_failure: true
docker-hadolint:
image:
name: "$DOCKER_HADOLINT_IMAGE"
entrypoint: [""]
extends: .docker-base
stage: build
dependencies: []
script:
- autoconfig_hadolint
- mkdir -p reports
- dockerfile_hash=$(md5sum "$DOCKER_FILE" | cut -d" " -f1)
# Output in Code Climate format (GitLab integration)
- hadolint --no-fail -f gitlab_codeclimate $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE" > "reports/hadolint-cc-${dockerfile_hash}.json"
# Output in JSON format
- hadolint --no-fail -f json $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE" > "reports/hadolint-json-${dockerfile_hash}.json"
# las run with console output (with failure)
- hadolint $DOCKER_HADOLINT_ARGS $hadolint_config_opts "$DOCKER_FILE"
artifacts:
name: "$CI_JOB_NAME artifacts from $CI_PROJECT_NAME on $CI_COMMIT_REF_SLUG"
expire_in: 1 day
when: always
reports:
codequality:
- "reports/hadolint-cc-*.json"
paths:
- "reports/hadolint-*.json"
rules:
# exclude if DOCKER_HADOLINT_DISABLED set
- if: '$DOCKER_HADOLINT_DISABLED == "true"'
when: never
# on production or integration branches: auto
- if: '$CI_COMMIT_REF_NAME =~ $PROD_REF || $CI_COMMIT_REF_NAME =~ $INTEG_REF'
# else (development branches): allow failure
- allow_failure: true
# ==================================================
# Stage: package-build
# ==================================================
docker-kaniko-build:
extends: .docker-kaniko-base
stage: package-build
script:
- run_build_kaniko "$DOCKER_SNAPSHOT_IMAGE" --digest-file .img-digest.txt --build-arg http_proxy="$http_proxy" --build-arg https_proxy="$https_proxy" --build-arg no_proxy="$no_proxy"
- docker_digest=$(cat .img-digest.txt)
- docker_repository=${DOCKER_SNAPSHOT_IMAGE%%:*}
- docker_tag=${DOCKER_SNAPSHOT_IMAGE#*:}
- echo "docker_image=$DOCKER_SNAPSHOT_IMAGE" > docker.env
- echo "docker_image_digest=$docker_repository@$docker_digest" >> docker.env
- echo "docker_repository=$docker_repository" >> docker.env
- echo "docker_tag=$docker_tag" >> docker.env
- echo "docker_digest=$docker_digest" >> docker.env
artifacts:
reports:
dotenv:
- docker.env
rules:
# execute if $DOCKER_DIND_BUILD not set
- if: '$DOCKER_DIND_BUILD == null || $DOCKER_DIND_BUILD == ""'
docker-dind-build:
extends: .docker-dind-base
stage: package-build
script:
- docker pull $DOCKER_SNAPSHOT_IMAGE || true
# Build using cache if exist
- docker build --file "$DOCKER_FILE" --cache-from $DOCKER_SNAPSHOT_IMAGE --tag $DOCKER_SNAPSHOT_IMAGE --build-arg http_proxy="$http_proxy" --build-arg https_proxy="$https_proxy" --build-arg no_proxy="$no_proxy" $DOCKER_METADATA $DOCKER_BUILD_ARGS "$(docker_context_path)"
- docker push $DOCKER_SNAPSHOT_IMAGE
# Display the size of each layer
- docker history $DOCKER_SNAPSHOT_IMAGE
# Display the total size of the image
- docker images --digests $DOCKER_SNAPSHOT_IMAGE
# create dotenv file
- image_with_digest=$(docker inspect --format '{{index .RepoDigests 0}}' "$DOCKER_SNAPSHOT_IMAGE")
- docker_digest=${image_with_digest#*@}
- docker_repository=${DOCKER_SNAPSHOT_IMAGE%%:*}
- docker_tag=${DOCKER_SNAPSHOT_IMAGE#*:}
- echo "docker_image=$DOCKER_SNAPSHOT_IMAGE" > docker.env
- echo "docker_image_digest=$docker_repository@$docker_digest" >> docker.env
- echo "docker_repository=$docker_repository" >> docker.env
- echo "docker_tag=$docker_tag" >> docker.env
- echo "docker_digest=$docker_digest" >> docker.env
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
artifacts:
reports:
dotenv:
- docker.env
rules:
- if: $DOCKER_DIND_BUILD
# ==================================================
# Stage: package-test
# ==================================================
# Tests should be run as a health_check. If so, you don't need to edit this job
docker-healthcheck:
extends: .docker-dind-base
variables:
GIT_STRATEGY: none
dependencies: []
stage: package-test
script: |
# Test by internal health_check (Recommended way, more info https://docs.docker.com/engine/reference/builder/#healthcheck)
# This looks complicated but you normally don't have to touch this...
function unexpected_error() {
log_error "Unexpected error"
if [ -n "$container_id" ]
then
docker logs $container_id
fi
exit 1
}
docker pull $DOCKER_SNAPSHOT_IMAGE
timestamp_from=$(( $(date +%s) - 1 ))
container_id=$(docker run -d $DOCKER_HEALTHCHECK_OPTIONS $DOCKER_SNAPSHOT_IMAGE $DOCKER_HEALTHCHECK_CONTAINER_ARGS)
log_info "container_id=$container_id"
waiting_time=0
healthcheck_result="timeout"
while [ $waiting_time -lt $DOCKER_HEALTHCHECK_TIMEOUT -a "$healthcheck_result" != "healthy" -a "$healthcheck_result" != "dead" ]
do
waiting_time=$(( $waiting_time + 5))
timestamp_to=$(( $timestamp_from + $waiting_time ))
log_info "Testing events between $timestamp_from and $timestamp_to ..."
full_result=$(docker events --filter container=$container_id --format="{{.Status}}" --since $timestamp_from --until $timestamp_to) || unexpected_error
if echo "$full_result" | grep ': healthy$' >/dev/null
then
healthcheck_result="healthy"
elif echo "$full_result" | grep ': unhealthy$' >/dev/null
then
log_warn "\\e[93mContainer is unhealthy\\e[0m"
healthcheck_result="unhealthy"
elif echo "$full_result" | grep '^die$' >/dev/null
then
log_error "Container died"
healthcheck_result="dead"
else
healthcheck_result="timeout"
fi
done
log_info "Container logs:"
docker logs $container_id
log_info "Docker inspect:"
docker inspect $container_id
if [ "$healthcheck_result" == "healthy" ]
then
log_info "Container is healthy"
else
log_error "HealthCheck test error, reason: $healthcheck_result"
echo -e "Full logs:\n$full_result"
exit 1
fi
rules:
- if: '$DOCKER_HEALTHCHECK_DISABLED == "true"'
when: never
- if: $DOCKER_DIND_BUILD
# Security audit with trivy
# This is a non-blocking job, it will always return (code) 0
docker-trivy:
extends: .docker-base
dependencies: []
image:
name: $DOCKER_TRIVY_IMAGE
entrypoint: [""]
stage: package-test
script: |
export TRIVY_USERNAME=${DOCKER_REGISTRY_SNAPSHOT_USER:-${DOCKER_REGISTRY_USER:-$CI_REGISTRY_USER}}
export TRIVY_PASSWORD=${DOCKER_REGISTRY_SNAPSHOT_PASSWORD:-${DOCKER_REGISTRY_PASSWORD:-$CI_REGISTRY_PASSWORD}}
export FILENAME=$(echo "${DOCKER_SNAPSHOT_IMAGE}" | sed 's|[/:]|_|g')
mkdir -p ./trivy
# the first execution of Trivy should never fail, otherwise the other executions won't be run (so --exit-code=0)
trivy client --remote ${DOCKER_TRIVY_ADDR} --format template --template @/contrib/junit.tpl --severity "${DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD}" --output ./trivy/${FILENAME}.xml --exit-code 0 ${DOCKER_TRIVY_ARGS} $DOCKER_SNAPSHOT_IMAGE
trivy client --remote ${DOCKER_TRIVY_ADDR} --format json --severity "${DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD}" --output ./trivy/${FILENAME}.json --exit-code 0 ${DOCKER_TRIVY_ARGS} $DOCKER_SNAPSHOT_IMAGE
trivy client --remote ${DOCKER_TRIVY_ADDR} --format table --severity "${DOCKER_TRIVY_SECURITY_LEVEL_THRESHOLD}" --exit-code 1 ${DOCKER_TRIVY_ARGS} $DOCKER_SNAPSHOT_IMAGE
artifacts:
when: always
paths:
- trivy/
reports:
junit: "trivy/*.xml"
rules:
- if: '$DOCKER_TRIVY_DISABLED == "true"'
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
when: never
- if: '$DOCKER_TRIVY_ADDR && ($CI_COMMIT_REF_NAME =~ $PROD_REF || $CI_COMMIT_REF_NAME =~ $INTEG_REF)'
# allow failure on development branches:
- if: $DOCKER_TRIVY_ADDR
allow_failure: true
# ==================================================
# Stage: publish
# ==================================================
# This stage only run when you put a new tag to the git repository (a good tag format would be x.x.x ex: 1.0.2, see https://semver.org/)
# It will push the release tagged image to the chosen Registry
docker-publish:
extends: .docker-base
image:
name: "$DOCKER_SKOPEO_IMAGE"
entrypoint: [""]
stage: publish
variables:
GIT_STRATEGY: none
dependencies: []
script:
- |
if [[ "$DOCKER_SNAPSHOT_IMAGE" == "$DOCKER_RELEASE_IMAGE" ]]
then
log_warn "\\e[93mYou should consider distinguishing snapshot and release images as they do not differ. Skipping publish phase as image has already been created by previous job.\\e[0m"
exit 0
fi
skopeo copy --src-authfile /skopeo/.docker/src-config.json --dest-authfile /skopeo/.docker/dest-config.json ${DOCKER_PUBLISH_ARGS} docker://$DOCKER_SNAPSHOT_IMAGE docker://$DOCKER_RELEASE_IMAGE
log_info "Well done your image is published and can be downloaded by doing: docker pull $DOCKER_RELEASE_IMAGE"
- docker_digest=$(skopeo inspect --format='{{ .Digest }}' "docker://$DOCKER_RELEASE_IMAGE")
- docker_repository=${DOCKER_RELEASE_IMAGE%%:*}
- docker_tag=${DOCKER_RELEASE_IMAGE#*:}
- echo "docker_image=$DOCKER_RELEASE_IMAGE" > docker.env
- echo "docker_image_digest=$docker_repository@$docker_digest" >> docker.env
- echo "docker_repository=$docker_repository" >> docker.env
- echo "docker_tag=$docker_tag" >> docker.env
- echo "docker_digest=$docker_digest" >> docker.env
artifacts:
reports:
dotenv:
- docker.env
rules:
# on tag: always
- if: $CI_COMMIT_TAG
# exclude non-production branches
- if: '$CI_COMMIT_REF_NAME !~ $PROD_REF'
when: never
# exclude if $PUBLISH_ON_PROD disabled
- if: '$PUBLISH_ON_PROD == "false" || $PUBLISH_ON_PROD == "no" || $PUBLISH_ON_PROD == ""'
when: never
# exclude if snapshot is same as release image
- if: '$DOCKER_SNAPSHOT_IMAGE == $DOCKER_RELEASE_IMAGE'
when: never
# if $AUTODEPLOY_TO_PROD: auto
- if: $AUTODEPLOY_TO_PROD
# else: manual + blocking
- if: $PUBLISH_ON_PROD # this 'if' is useless but only prevents GitLab warning :(
when: manual