Skip to content
Snippets Groups Projects
Commit c46a77c3 authored by Zitnik, Anze's avatar Zitnik, Anze
Browse files

Initial commit on public

parents
No related branches found
No related tags found
No related merge requests found
Showing
with 460 additions and 0 deletions
.idea
e2e-results
image: registry-gitlab.xlab.si/cyberwiser/os-xlab-ci-runner-base-image:latest
stages:
- reprovision
- simulate-scenario-instantiation
- test-api
- test-notifications
- test-e2e
ci_job_0:
stage: reprovision
variables:
ENVIRONMENT: 'os-xlab-ci'
script:
- date -u
- make provision-reset-deploy
- make provision-remove-es-volume
- make provision
only:
- develop
ci_job_1:
stage: simulate-scenario-instantiation
variables:
ENVIRONMENT: 'os-xlab-ci'
script:
- make provision-scenario-instantiation
only:
- develop
ci_job_2:
image: nexus-registry.xlab.si:5001/node:10-alpine
stage: test-api
variables:
script:
- apk add --no-cache git make python
- npm install -g mocha
- scripts/test-api.sh
only:
- develop
ci_job_3:
stage: test-notifications
image: nexus-registry.xlab.si:5001/node:10-alpine
script:
- date -u
- apk add --no-cache git make python
- npm install -g mocha
- scripts/test-notifications.sh
only:
- develop
ci_job_4:
stage: test-e2e
variables:
ENVIRONMENT: 'os-xlab-ci'
script:
- date -u
- make test-e2e
artifacts:
when: always
expire_in: 1 week
paths:
- results
only:
- develop
{
"makefile.extensionOutputFolder": "./.vscode"
}
\ No newline at end of file
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
stages:
- reprovision
- release
ci_job_0:
stage: reprovision
script:
- make ci-reprovision
only:
- develop
merge:
stage: release
only:
- develop
script:
- make merge-on-master
SERVICE=deploy-vat
VERSION=v0.5.1
Makefile 0 → 100644
ENVIRONMENT ?= vagrant
DEPLOY_DIR = $(PWD)
SCRIPTS_DIR = $(DEPLOY_DIR)/scripts
ENV_DIR = $(DEPLOY_DIR)/environments/$(ENVIRONMENT)
ANSIBLE_DIR = $(DEPLOY_DIR)/ansible
ANSIBLE_ENV = ANSIBLE_ROLES_PATH=$(ANSIBLE_DIR)/roles ANSIBLE_HASH_BEHAVIOUR=merge
FORCE_RESTART ?= False
include $(ENV_DIR)/$(ENVIRONMENT).mk
ANSIBLE_ARGS = -i $(ENV_DIR)/inventory \
--private-key=$(SSH_PRIVATE_KEY) \
-e ansible_dir=$(ANSIBLE_DIR) \
-e gitlab_docker_registry_user=gitlab-token \
-e gitlab_docker_registry_password=REMOVED \
-e nexus_docker_registry_user=REMOVED \
-e nexus_docker_registry_password=REMOVED \
-e '{ force_restart: $(FORCE_RESTART) }' \
-e environment_dir=$(ENV_DIR) \
-u $(SSH_USER) $(EXTRA_ARGS)
reprovision:
@ANSIBLE_HOST_KEY_CHECKING=False $(ANSIBLE_ENV) ansible-playbook $(ANSIBLE_ARGS) $(ANSIBLE_DIR)/books/provision-reset-deploy.yml
@ANSIBLE_HOST_KEY_CHECKING=False $(ANSIBLE_ENV) ansible-playbook $(ANSIBLE_ARGS) $(ANSIBLE_DIR)/books/provision.yml
provision:
@ANSIBLE_HOST_KEY_CHECKING=False $(ANSIBLE_ENV) ansible-playbook $(ANSIBLE_ARGS) $(ANSIBLE_DIR)/books/provision.yml
PROVISION_TARGETS=$(notdir $(basename $(wildcard $(ANSIBLE_DIR)/books/provision-*.yml)))
$(PROVISION_TARGETS):
@ANSIBLE_HOST_KEY_CHECKING=False $(ANSIBLE_ENV) ansible-playbook $(ANSIBLE_ARGS) $(ANSIBLE_DIR)/books/$@.yml
test-e2e:
@$(RUN_SHELL) $(SCRIPTS_DIR)/test-e2e.sh
test-api:
@$(RUN_SHELL) $(SCRIPTS_DIR)/test-api.sh
test-notifications:
@$(RUN_SHELL) $(SCRIPTS_DIR)/test-notifications.sh
README.md 0 → 100644
# Vulnerability Assessment Tools (VAT) deploy scripts
Ansible scripts with make wrapper for deploying all VAT services.
## Requirements
Confirmed to work with the following versions:
- Vagrant: 1.9.8
- Virtualbox: 5.1.26
- Ansible: 2.7.8
## Instructions
### Make targets
- `create`: creates Vargant VM (in vagrant environment)
- `delete`: deletes Vagrant VM (in vagrant environment)
- `ssh-services`: ssh access to service VM
- `ssh-runtime`: ssh access to dynamic docker runtime VM
- `ssh-vulnerable`: ssh access to VM hosting vulnerable services
- `provision`: run Ansible deploy scripts
- `provision-<service>`: run provisioning for one service only (after the main provisioning process is already complete), for example `provision-log-dog`
### App domain
The vagrant deployment sets the domain to `vat.project.com`. The domain can be overriden in other environments by setting
the `deploy_domain` variable (see section on defining environments).
Adding the following line to `/etc/hosts` will ensure the local deployment is resolvable:
```
192.168.33.10 vat.project.com
```
### ElasticSearch
`Elasticsearch HQ` is at the following address: http://192.168.33.10:5000/ (IP in vagrant environment).
Running `Kibana` with the deployed `ElasticSearch` is achieved by the following command:
```
docker run --name some-kibana3 -e ELASTICSEARCH_URL=http://192.168.33.10:9200 -e XPACK_SECURITY_ENABLED=false -p 5601:5601 -d docker.elastic.co/kibana/kibana:6.1.3
```
It takes about 8 minutes for `Kibana` to start, then go to `localhost:5601`
`ElasticSearch` is at the following address: http://192.168.33.10:9200/ (IP in vagrant environment).
### RabbitMQ
Rabbitmq management interface address: http://192.168.33.10:15672/ (IP in vagrant environment).
## Environments
The only currently defined environment is `vagrant` (used by default), that performs a local deploy
to 3 VMs (services, docker dynamic runtime and vulnerable services placeholder).
When more environments are defined, the selection is done using environment variables:
```
export ENVIRONMENT=vagrant #Vagrant, default
export ENVIRONMENT=os-xlab #XLAB Openstack (does not exist yet)
# or
ENVIRONMENT=... make ...
```
A custom environment can be defined by placing a new directory in `environments`,
it must contain an `inventory` file (Ansible) and `<dir_name>.mk`, that must contain
at least 2 variables, `SSH_PRIVATE_KEY` and `SSH_USER`, the user and ssh key for accessing
the target deploy devices. The ssh user must have password-less sudo privileges.
Optionally, a `provision-config.override.yml` file can be provided,
used for overriding default deploy variables.
### Including certificates for scenario / CITEF MBs
Environments which need SSL certificates for connecting to scenario or CITEF message brokers
should include `citef.tar` and / or `scenario.tar` tarballs in their respective directories
that contain the server's CA cert, the client cert and private key. Service mounts must also
be defined like this in the environments `provision.config-override.yml` file:
```yaml
mounts:
xruntime:
- "{{ scenario_cert_dir }}/ca.crt:/etc/xruntime/ca.crt"
- "{{ scenario_cert_dir }}/cert.pem:/etc/xruntime/cert.pem"
- "{{ scenario_cert_dir }}/key.pem:/etc/xruntime/key.pem"
cesar_milan:
- "{{ scenario_cert_dir }}:/service/scenario_rmq_cert/"
- "{{ citef_cert_dir }}:/service/citef_rmq_cert/"
```
Note that due to image specifics Xruntime's cert files must be mounted one by one, as the destination
directory contains the executable. Cyberrange environment is the exception where no cert tarballs are
present in the environment directory, but the mounts are specified, since it runs on a pre-provisioned
VM.
Note that scenario.tar in `image-builder-services` contains certs for ATOS' MB, while scenario.tar
in `os-xlab-ci` contains certs for internal test SSL RMQ.
### How to setup Ansible inventory for custom environments
Ansible uses an inventory file as the definition of the deploy environment and
what roles to execute over which hosts. All entries contain a `public_ip` variable
for easier dynamic configuration, as relying on interface names and orders is error prone,
for example if ext0 would be used, vagrant VMs would all fail due to vagrant's NAT interface.
Even though the variable states `ip`, hostnames can be used too.
Following is the inventory file from the `vagrant` environment with group explanations:
```
# devices where docker has to be installed
[docker]
192.168.33.10 public_ip=192.168.33.10
192.168.33.11 public_ip=192.168.33.11
192.168.33.12 public_ip=192.168.33.12
# host for consul server, must contain one device only
[dns-server]
192.168.33.10 public_ip=192.168.33.10
# devices that need to resolve internal addresses, omitted from inventory
# as infrastructure services run on same node as application containers,
# do not add the same node as dns-server to this group
# [dns-agent]
# 192.168.33.10 public_ip=192.168.33.10
# 192.168.33.12 public_ip=192.168.33.12
# 192.168.33.13 public_ip=192.168.33.13
# host for infrastructure services (Rabbit, Swift, Elastic), must contain one device only
[infrastructure]
192.168.33.10 public_ip=192.168.33.10
# host for application services
[services]
192.168.33.10 public_ip=192.168.33.10
# host for vulnerable services, attack targets
[vulnerable]
192.168.33.12 public_ip=192.168.33.12
# docker runtime host, does not execute role, but is used to configure
# docker interface target host for running attack script containers
[runtime]
192.168.33.11 public_ip=192.168.33.11
```
*.retry
- import_playbook: "{{ ansible_dir }}/helpers/wait-rmq-service.yml"
vars:
service_to_check: xruntime # for container name only, doesn't affect actual check task
check_task:
task_type: tasks.get
details: {}
retry_check:
max_attempts: 12
interval: 5
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
roles:
- role: docker-container
service_name: api
service_type: 'docker'
service_has_config: True
service_config_format: json
service_config_path: /service/config/config.json
service_ports:
- "{{ ports.api }}:10010"
service_mounts: "{{ mounts.api }}"
service_image: "{{ images.api }}:{{ versions.api }}"
service_use_local_image: "{{ cyberrange }}"
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
roles:
- role: docker-container
service_name: auth
service_type: 'docker'
service_has_config: True
service_config_format: json
service_config_path: /service/config/config.json
service_has_configs: True
service_configs:
- { path: /service/auth/auth.json }
service_image: "{{ images.auth }}:{{ versions.auth }}"
service_use_local_image: "{{ cyberrange }}"
- hosts: clc services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- name: create directory for scenario MB certs
file:
state: directory
path: "{{ scenario_cert_dir }}"
roles:
- role: prepare-certs
cert_target: scenario
cert_dir: "{{ scenario_cert_dir }}"
- hosts: clc
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- name: create directory for CITEF MB certs
file:
state: directory
path: "{{ citef_cert_dir }}"
roles:
- role: prepare-certs
cert_target: citef
cert_dir: "{{ citef_cert_dir }}"
- hosts: clc
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
vars:
rabbit_container: "{{ 'vasily-zaytsev' if local_rabbit else 'citef-rabbit' }}"
when: local_citef_rabbit | bool
- name: create directory for cesar conf dir mount
file:
state: directory
path: "{{ cesar_mount_dir }}"
roles:
- role: docker-container
service_name: cesar-milan
service_type: 'docker'
service_has_config: True
service_config_format: json
service_config_path: /service/config/config.json
service_mounts: "{{ mounts.cesar_milan }}"
service_image: "{{ images.cesar_milan }}:{{ versions.cesar_milan }}"
service_use_local_image: "{{ cyberrange }}"
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
roles:
- role: docker-container
service_name: ci-clc-test
service_type: 'docker'
service_has_config: False
service_image: registry-gitlab.xlab.si/cyberwiser/ci-clc-test:test
service_use_local_image: "{{ cyberrange }}"
run_to_completion: True
- hosts: citef-rmq
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: docker-container
service_name: citef-rabbit
service_type: 'docker'
service_ports:
- "{{ ports.citef_rabbit.service }}:5671"
- "{{ ports.citef_rabbit.management }}:15672"
service_image: registry-gitlab.xlab.si/cyberwiser/ssl-rabbit:latest
service_use_local_image: "{{ cyberrange }}"
---
- import_playbook: provision-elastic.yml
- import_playbook: provision-elastic-hq.yml
- import_playbook: provision-kibana.yml
- import_playbook: provision-cesar-milan.yml
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
roles:
- role: docker-container
service_name: docker-talker
service_type: 'docker'
service_has_config: True
service_config_format: json
service_config_path: /service/config/config.json
service_image: "{{ images.docker_talker }}:{{ versions.docker_talker }}"
restart_policy: on-failure
# service_mounts_extended:
# - type=volume,src=talker_volume,dst=/service/container_files,volume-driver=local,volume-opt=type=nfs,\"volume-opt=o=nfsvers=4,addr={{ nfs_host }}\",volume-opt=device=:{{ nfs_share }}
service_use_local_image: "{{ cyberrange }}"
- hosts: clc
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: docker-container
service_name: elastic-hq
service_type: 'docker'
service_ports:
- "{{ ports.elastic_hq }}:5000"
service_image: "{{ images.elastic_hq }}"
service_env_vars:
- "HQ_DEFAULT_URL=http://{{ elastic_host }}:{{ ports.elasticsearch }}"
- hosts: clc
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: docker-container
service_name: elastic
service_type: 'docker'
service_ports:
- "{{ ports.elasticsearch }}:9200"
service_image: "{{ images.elasticsearch }}:{{ versions.elasticsearch }}"
service_mounts:
- "{{ es_volume }}:/usr/share/elasticsearch/data"
service_env_vars:
- "discovery.type=single-node"
---
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: docker-container
service_name: frontend
service_type: 'docker'
service_has_config: True
service_config_format: cfg
service_config_path: /service/dist/config.cfg
service_image: "{{ images.frontend }}:{{ versions.frontend }}"
service_ports:
- "{{ ports.frontend }}:80"
service_use_local_image: "{{ cyberrange }}"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment