Skip to content
Snippets Groups Projects
Commit c46a77c3 authored by Zitnik, Anze's avatar Zitnik, Anze
Browse files

Initial commit on public

parents
No related branches found
No related tags found
No related merge requests found
Showing
with 461 additions and 0 deletions
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-mongo.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-ext-port.yml"
vars:
remote_host: "{{ cw_rabbit.host }}"
remote_port: "{{ ports.cw_rabbit.service }}"
wait_condition: "{{ not local_rabbit }}"
roles:
- role: docker-container
service_name: xruntime
service_type: 'docker'
service_image: "{{ images.xruntime }}:{{ versions.xruntime }}"
service_has_config: True
service_config_format: yml
service_config_path: /etc/xruntime/config.yml
service_mounts: "{{ mounts.xruntime }}"
restart_policy: on-failure
service_use_local_image: "{{ cyberrange }}"
---
- hosts: docker
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: docker-engine
as_system_service: True
- hosts: dns-server
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: dns
consul_server: True
post_tasks:
- import_tasks: "{{ ansible_dir }}/helpers/wait-consul.yml"
- hosts: dns-agent
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: dns
consul_agent: True
- hosts: service-discovery
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: service-discovery
# - hosts: nfs
# become: yes
# pre_tasks:
# - import_tasks: "{{ ansible_dir }}/globals/vars.yml"
# roles:
# - role: nfs
- hosts: runtime
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: runtime
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: services
# test services, only deployed if inventory groups exists, used only by CI
- import_playbook: provision-scenario-rmq.yml
- import_playbook: provision-citef-rmq.yml
# set up rmq certificates, if environment supplies them
- import_playbook: provision-certs.yml
- import_playbook: provision-infrastructure.yml
- import_playbook: provision-clc.yml
- import_playbook: provision-services.yml
- import_playbook: provision-vulnerable.yml
- import_playbook: provision-prepull-images.yml
# Commenting out image and volume pruning as it's not yet functional (copy paste from different project)
# In Cyberwiser use case, it would actually be detrimantal, as attack tool images are prepulled but containers not
# started, so images would be removed needlessly
# Suggestion: make this a separate make provision-... target that CI runs at the end of pipelines
# - hosts: docker
# become: yes
# tasks:
# - name: cleanup old images
# command: docker image prune -f
# when: dev_mode is defined and dev_mode
# - name: cleanup old volumes
# command: docker volume prune -f
# when: dev_mode is defined and dev_mode
---
docker_registry_gitlab: registry-gitlab.xlab.si
docker_registry_gitlab_proxy: nexus-registry.xlab.si:5000
docker_registry_docker_hub_proxy: nexus-registry.xlab.si:5001
docker_registry_vat_base: "{{ docker_registry_gitlab_proxy }}/cyberwiser"
docker_registry_xcollection_base: "{{ docker_registry_gitlab_proxy }}/x-collection"
service_config_dir: /etc/cyberwiser
# indicates if this is a vagrant / openstack deploy or a cyberrange deploy
# if deploying on cyberrage, pulling images will be skipped (they will be preloaded),
# since internet access is by default disabled
cyberrange: False
# Affects the usage of wait-for helpers, which will be invoked only if
# we're locally deploying our instances of rabbit and elastic in docker containers.
local_rabbit: true
local_citef_rabbit: true
local_elastic: true
es_volume: es_data
rabbit_user: guest
rabbit_pass: guest
rabbit_host: vasily-zaytsev.service.consul
citef_cert_dir: "{{ service_config_dir }}/certs/citef/"
scenario_cert_dir: "{{ service_config_dir }}/certs/scenario/"
cesar_mount_dir: "{{ service_config_dir }}/cesar"
# Name of the sudo team, so that user have access to everything else
white_team_name: "white"
cw_rabbit:
user: "{{ rabbit_user }}"
pass: "{{ rabbit_pass }}"
host: "{{ rabbit_host }}"
ssl: false
#cacert: {{ }}
citef_rabbit:
user: "{{ rabbit_user }}"
pass: "{{ rabbit_pass }}"
host: "{{ rabbit_host }}"
ssl: false
#cacert: {{ }}
swift_host: swift.service.consul
swift_volume: swift_volume
swift_persistence_label: swift_persistence
swift_placement_constaint: node.labels.{{ swift_persistence_label }} == True
elastic_host: elastic.service.consul
xruntime:
log_level: debug # debug or info
# set to true if you want xruntime to instruct docker-interface
# to search for docker images locally
use_local_images: false
must_wait_for_start: false
#ssl:
# client_key: {{ }}
# client_cert: {{ }}
docker_runtime_host: "{{ hostvars[groups['runtime'][0]]['public_ip'] }}"
docker_runtime_port: 2376
docker_autoremove_containers: true
mongo_host: mongo.service.consul
mongo_user: root
mongo_pass: root
mongo_dbname: xruntime
mongo_storage_dir: /etc/mongodb/data # on the host, for persistence of mongodb container data
postgres_storage_dir: /etc/postgres/data # on the host, for persistence of postgres container data
deploy_domain: vat.project.com
deploy_protocol: http
scp_user: root
scp_base_dir: /etc/cyberwiser/task_files/
scp_port: 22
configuration_postgres_db:
# username can not be changed
host: postgres-db.service.consul
username: postgres
password: password
dialect: postgres
logging: false
databases:
notifications:
username: notification
password: notification
images:
docker_talker: "{{ docker_registry_vat_base }}/docker-interface/docker-talker"
xruntime: "{{ docker_registry_vat_base }}/xruntime"
api: "{{ docker_registry_vat_base }}/rest-api"
notifications_connector: "{{ docker_registry_vat_base }}/notifications-connector"
notifications_processor: "{{ docker_registry_vat_base }}/notifications-processor"
proxy: "{{ docker_registry_vat_base }}/proxy"
log_dog: "{{ docker_registry_vat_base }}/log-dog"
auth: "{{ docker_registry_vat_base }}/auth"
cesar_milan: "{{ docker_registry_vat_base }}/cesar-milan"
rabbit: "{{ docker_registry_docker_hub_proxy }}/rabbitmq:management-alpine"
swift: "{{ docker_registry_docker_hub_proxy }}/morrisjobke/docker-swift-onlyone:latest"
elasticsearch: "{{ docker_registry_vat_base }}/clc-elastic"
kibana: "{{ docker_registry_vat_base }}/clc-kibana"
surreal: "{{ docker_registry_vat_base }}/attack-scripts/surrealtodo-lfi"
dvwa: "{{ docker_registry_docker_hub_proxy }}/vulnerables/web-dvwa"
elastic_hq: "{{ docker_registry_docker_hub_proxy }}/elastichq/elasticsearch-hq"
consul: "{{ docker_registry_docker_hub_proxy }}/consul:1.0.0"
registrator: "{{ docker_registry_docker_hub_proxy }}/gliderlabs/registrator:v6"
mongo: "{{ docker_registry_docker_hub_proxy }}/mongo:3.4"
mongo_client: "{{ docker_registry_docker_hub_proxy }}/mongoclient/mongoclient:latest"
frontend: "{{ docker_registry_vat_base }}/frontend/frontend"
healthchecker: "{{ docker_registry_xcollection_base }}/docker/services/rabbit-healthchecker"
postgres: "{{ docker_registry_docker_hub_proxy }}/postgres"
mounts:
xruntime: [ ]
log_dog:
- path: "/root/config/log-dog.conf"
api: [ ]
cesar_milan:
- "/var/run/docker.sock:/var/run/docker.sock"
- "{{ cesar_mount_dir }}:/tmp/"
ports:
cw_rabbit:
service: 5672
management: 15673
citef_rabbit:
service: 5672
management: 15672
rabbit:
service: 5672
management: 15672
swift: 12345
elasticsearch: 9200
kibana: 5601
elastic_hq: 5000
api: 10010
notifications_processor: 10011
notifications_connector: 10099
surreal:
http: 80
dvwa:
http: 81
proxy: 80
mongo: 27017
mongo_client: 3000
frontend: 10080
postgres: 5432
---
- name: include provision config
include_vars: "{{ ansible_dir }}/../provision-config.yml"
- name: include globals
include_vars: globals.yml
- name: check existence provision-config.override.yml
local_action: stat path={{ environment_dir }}/provision-config.override.yml
changed_when: False
become: no
register: provision_config_override
- name: vars - include 'provision-config.override.yml'
include_vars: "{{ environment_dir }}/provision-config.override.yml"
when: provision_config_override.stat.exists
---
- name: wait for consul server to start up
shell: docker logs consul-server
register: result
until: result.stdout.find("Consul agent running!") != -1
retries: 20
delay: 5
---
- name: wait for elastic to show it's healthy
shell: docker inspect -f {%raw%}{{.State.Health.Status}}{%endraw%} elastic
register: result
until: result.stdout == 'healthy'
retries: 20
delay: 5
---
- name: wait for port on remote server to open
wait_for:
port: "{{ remote_port }}"
host: "{{ remote_host }}"
timeout: "{{ remote_timeout|default(300) }}"
when: wait_condition|default(True)
---
- name: wait for mongodb to be ready
shell: docker logs mongo
register: result
until: result.stdout.find("waiting for connections") != -1
retries: 20
delay: 5
\ No newline at end of file
---
- name: wait for rabbit to be ready
shell: docker exec {{ rabbit_container | default('vasily-zaytsev') }} rabbitmqadmin show overview
register: result
until: result.rc == 0
retries: 20
delay: 5
- hosts: services
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
- import_tasks: "{{ ansible_dir }}/helpers/wait-rabbit.yml"
roles:
- role: docker-container
service_name: healthcheck
service_postfix: "-{{ service_to_check|default('service') }}"
service_type: 'docker'
service_has_config: True
service_config_format: json
service_config_path: /service/config/config.json
service_image: "{{ images.healthchecker }}:{{ versions.healthchecker }}"
run_to_completion: True
service_use_local_image: "{{ cyberrange }}"
.idea
\ No newline at end of file
---
stages:
- update_docs
- check
ci_job_update_docs:
stage: update_docs
only:
- master
script:
- "rm -r ${CI_PROJECT_DIR%$CI_PROJECT_PATH}x-collection/docs || true"
- "git clone ssh://git@gitlab.xlab.si:13022/x-collection/docs.git ${CI_PROJECT_DIR%$CI_PROJECT_PATH}x-collection/docs"
- "make -C ${CI_PROJECT_DIR%$CI_PROJECT_PATH}x-collection/docs/ update-version commit-version"
ci_job_qa_check:
stage: check
only:
- master
before_script:
- "rm -r ${CI_PROJECT_DIR%$CI_PROJECT_PATH}x-collection/tools/release-scripts || true"
- "git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.xlab.si/x-collection/tools/release-scripts.git ${CI_PROJECT_DIR%$CI_PROJECT_PATH}x-collection/tools/release-scripts"
script:
- make qa-check
ANSIBLE_FILES_PATH_MATCH="*/tasks/*"
\ No newline at end of file
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.1.2] - 2019-02-22
### Changed
- Change consul check
## [1.1.1] - 2019-01-07
### Fixed
- Fix QA issues.
## [1.1.0] - 2019-01-04
### Changed
- Change repo path
- QA task taken from RS
## [1.0.0] - 2018-11-19
### Added
- QA integration
## [0.0.1] - 2018-11-14
### Added
- Initial release
VERSION=v1.1.2
SERVICE=dns
\ No newline at end of file
-include ../../../tools/release-scripts/src/MakefileQA
# Ansible role dns
With this role you can configure dns-server and dns-agent on your host machine.
## Including into your repository
This role you can use in your project as subtree:
`git subtree add --prefix=ansible/roles/dns ssh://git@gitlab.xlab.si:13022/x-collection/deployment/ansible-roles/dns.git master --squash`
And later you can update it to latest version with following command:
`git subtree pull --prefix=ansible/roles/dns ssh://git@gitlab.xlab.si:13022/x-collection/deployment/ansible-roles/dns.git master --squash`
## Usage
### Playbook
In your playbook you define play for `dns` role:
```yaml
- hosts: dns-server
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: dns
consul_server: True
post_tasks:
- name: wait for consul server to start up
shell: docker logs consul-server
register: result
until: result.stdout.find("Consul agent running!") != -1
retries: 20
delay: 5
- hosts: dns-agent
become: yes
pre_tasks:
- import_tasks: "{{ ansible_dir }}/globals/vars.yml"
roles:
- role: dns
consul_agent: True
```
---
- name: check if consul already running
command: docker inspect consul-agent
register: consul_running
ignore_errors: true
- name: start consul
command: >
docker run -d -p 8301:8301/tcp -p 8301:8301/udp -p 53:8600/udp
--name consul-agent {{ images.consul }} agent -client 0.0.0.0
-join={{ hostvars[groups['dns-server'][0]]['public_ip'] }}
-advertise {{ hostvars[inventory_hostname]['public_ip'] }} -recursor 8.8.8.8
when: consul_running is failed
---
- import_tasks: agent.yml
when: consul_agent is defined and consul_agent
- import_tasks: server.yml
when: consul_server is defined and consul_server
---
- name: check if consul already running
command: docker inspect consul-server
register: consul_running
ignore_errors: true
- name: start consul
command: >
docker run -d -p 8300:8300 -p 8301:8301/tcp -p 8301:8301/udp -p 8500:8500 -p 53:8600/udp
--name consul-server {{ images.consul }} agent -server -bootstrap -client 0.0.0.0
-advertise {{ hostvars[inventory_hostname]['public_ip'] }} -recursor 8.8.8.8
when: consul_running is failed
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment