Initial commit

parents
.vagrant/
.vagrant
ubuntu-xenial-16.04-cloudimg-console.log
k8s-master.retry
k8s-worker.retry
admin.conf
join.sh
docker-registry.crt
Dockerfile
$bootstrap_ansible = <<-SHELL
echo "Installing Ansible..."
sudo apt-get update -y
sudo apt-get install -y software-properties-common
sudo apt-add-repository ppa:ansible/ansible
sudo apt-get update -y
sudo apt-get install -y ansible apt-transport-https
SHELL
$restart_kubelet = <<-SHELL
echo "Restarting Kubelet..."
sudo systemctl daemon-reload
sudo systemctl restart kubelet
SHELL
Vagrant.configure(2) do |config|
(1..3).each do |i|
config.vm.define "k8s#{i}" do |s|
s.ssh.forward_agent = true
s.vm.box = "ubuntu/xenial64"
s.vm.hostname = "k8s#{i}"
s.vm.provision :shell,
inline: $bootstrap_ansible
if i == 1
s.vm.provision :shell,
inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/k8s-master.yml -c local"
else
s.vm.provision :shell,
inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/k8s-worker.yml -c local"
end
s.vm.provision :shell,
inline: "echo 'KUBELET_EXTRA_ARGS=--node-ip=192.168.56.#{i+10}' | sudo tee /etc/default/kubelet"
s.vm.provision :shell,
inline: $restart_kubelet
s.vm.network "private_network",
ip: "192.168.56.#{i+10}",
netmask: "255.255.255.0",
auto_config: true
#virtualbox__intnet: "k8s-net"
s.vm.provider "virtualbox" do |v|
v.name = "k8s#{i}"
v.cpus = 2
v.memory = 2048
v.gui = true
v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
v.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ]
#v.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
end
end
end
end
- hosts: localhost
remote_user: vagrant
serial: 1
roles:
- k8s-base
- k8s-master
- hosts: localhost
remote_user: vagrant
serial: 1
roles:
- k8s-base
- k8s-worker
#!/bin/bash
systemctl stop kubelet;
docker rm -f $(docker ps -q); mount | grep "/var/lib/kubelet/*" | awk '{print $3}' | xargs umount 1>/dev/null 2>/dev/null;
rm -rf /var/lib/kubelet /etc/kubernetes /var/lib/etcd /etc/cni /etc/kubernetes;
mkdir -p /etc/kubernetes
ip link set cbr0 down; ip link del cbr0;
ip link set cni0 down; ip link del cni0;
systemctl start kubelet
#!/bin/bash
docker run -d --restart=always -p 2376:2375 \
-v /var/run/docker.sock:/var/run/docker.sock \
alpine/socat tcp-listen:2375,fork,reuseaddr unix-connect:/var/run/docker.sock
---
- name: Remove Default Host Entry
become: yes
lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.1\.1\s+k8s.*$'
state: absent
tags:
- k8s
- name: Ensure Hosts File
become: yes
lineinfile:
dest: /etc/hosts
line: "{{ item.ip }} {{ item.name }}"
with_items:
- { ip: "192.168.56.11", name: "k8s1" }
- { ip: "192.168.56.12", name: "k8s2" }
- { ip: "192.168.56.13", name: "k8s3" }
tags:
- k8s
- name: Ensure Docker Apt Key
become: yes
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
tags:
- k8s
- name: Ensure Docker Repository
become: yes
apt_repository:
repo: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable'
state: present
update_cache: yes
tags:
- k8s
- name: Ensure Google Cloud Apt Key
become: yes
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
tags:
- k8s
- name: Ensure Kubernetes Repository
become: yes
apt_repository:
repo: 'deb http://apt.kubernetes.io/ kubernetes-xenial main'
state: present
update_cache: yes
tags:
- k8s
- name: Ensure Base Tools
become: yes
apt:
name: "{{ packages }}"
vars:
packages:
- curl
- jq
- bc
- gawk
- apt-transport-https
tags:
- k8s
- name: Update system
become: yes
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 86400 #One day
tags:
- k8s
- name: Ensure Docker
become: yes
apt:
name: docker-ce=18.06.2~ce~3-0~ubuntu
tags:
- k8s
- name: Ensure kubelet
become: yes
apt:
name: kubelet=1.14.1-00
tags:
- k8s
- name: Ensure kubeadm
become: yes
apt:
name: kubeadm=1.14.1-00
tags:
- k8s
- name: Ensure Docker Group
group:
name: docker
state: present
tags:
- k8s
- name: Ensure User in Docker Group
user:
name=vagrant
groups=docker
append=yes
tags:
- k8s
- name: Ensure Kubernetes Cleanup
become:
copy:
src: files/clean-k8s
dest: /usr/local/bin
mode: 0755
owner: root
group: root
tags:
- k8s
- name: Ensure Docker Socat
become:
copy:
src: files/docker-socat
dest: /usr/local/bin
mode: 0755
owner: root
group: root
tags:
- k8s
- name: Ensure swap is off
become: yes
command: "swapoff -a"
tags:
- k8s
- name: Remove swap from fstab
become: yes
lineinfile:
dest: /etc/fstab
regexp: 'swap'
state: absent
tags:
- k8s
- name: Create playground directories
become: yes
file:
path: "{{ item.path }}"
state: directory
with_items:
- { path: "/var/tmp/kafka/kafka-logs" }
- { path: "/var/tmp/zookeeper/data" }
- { path: "/var/tmp/zookeeper/datalog" }
- { path: "/var/tmp/flink/jobmanager" }
- { path: "/var/tmp/flink/taskmanager" }
tags:
- k8s
#!/bin/bash
echo "kubeadm join 192.168.56.11:6443 --token $(kubeadm-token) --discovery-token-ca-cert-hash sha256:$(kubeadm-hash)" > /home/vagrant/join.sh
#!/bin/bash
kubectl create -f /var/tmp/storageclass-config.yaml
kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
#!/bin/bash
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
#!/bin/bash
kubectl create -f /var/tmp/docker-registry.yaml
#!/bin/bash
kubectl delete -f /var/tmp/docker-registry.yaml
#!/bin/bash
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
#!/bin/bash
kubeadm token list | grep "authentication" | awk '{ print $1 }'
#!/bin/bash
kubectl create -f /var/tmp/rbac-kdd.yaml
kubectl create -f /var/tmp/calico.yaml
#!/bin/bash
helm init --service-account tiller
helm repo update
#!/bin/bash
kubectl taint nodes --all node-role.kubernetes.io/master-
---
- name: Pull Docker images
become: yes
command: "kubeadm config images pull"
tags:
- k8s
- name: Ensure kubeadm initialization
become: yes
command: "kubeadm init --pod-network-cidr=172.43.0.0/16 --apiserver-advertise-address=192.168.56.11"
tags:
- k8s
- name: Copy dashboard rbac config
become: yes
copy:
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
dest: /var/tmp/dashboard-rbac-config.yaml
tags:
- k8s
- name: Copy tiller rbac config
become: yes
copy:
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
dest: /var/tmp/tiller-rbac-config.yaml
tags:
- k8s
- name: Copy StorageClass config
become: yes
copy:
content: |
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: standard
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
dest: /var/tmp/storageclass-config.yaml
tags:
- k8s
- name: Copy config to /Vagrant for other VMs
become: yes
copy:
src: /etc/kubernetes/admin.conf
dest: /vagrant/admin.conf
owner: vagrant
group: vagrant
mode: 0600
tags:
- k8s
- name: Copy config to home directory
become: yes
copy:
src: /etc/kubernetes/admin.conf
dest: /home/vagrant/admin.conf
owner: vagrant
group: vagrant
mode: 0600
tags:
- k8s
- name: Update Environment
become: yes
lineinfile:
path: /home/vagrant/.bashrc
regexp: '^export KUBECONFIG='
line: 'export KUBECONFIG=/home/vagrant/admin.conf'
state: present
tags:
- k8s
- name: Ensure Calico network file
become: yes
get_url:
url: https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
dest: /var/tmp/calico.yaml
mode: 0444
tags:
- k8s
- name: Ensure Calico RBAC file
become: yes
get_url:
url: https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
dest: /var/tmp/rbac-kdd.yaml
mode: 0444
tags:
- k8s
- name: Ensure Calico CIDR
become: yes
replace:
path: /var/tmp/calico.yaml
regexp: '192.168.0.0\/16'
replace: '172.43.0.0/16'
backup: yes
tags:
- k8s
- name: Ensure Network Start Scripts
become: yes
copy:
src: files/{{ item }}
dest: /usr/local/bin/{{ item }}
owner: root
group: root
mode: 0755
with_items:
- "start-calico"
tags:
- k8s
- name: Ensure Utility Scripts
become: yes
copy:
src: files/{{ item }}
dest: /usr/local/bin/{{ item }}
owner: root
group: root
mode: 0755
with_items:
- "dashboard-token"
- "kubeadm-hash"
- "kubeadm-token"
- "taint-nodes"
- "start-tiller"
- "create-storage-class"
- "create-join-script"
- "docker-registry-create"
- "docker-registry-delete"
tags:
- k8s
- name: Ensure Kube directory
become: yes
file:
path: /home/vagrant/.kube
state: directory
tags:
- k8s
- name: Copy config to Kube directory
become: yes
copy:
src: /etc/kubernetes/admin.conf
dest: /home/vagrant/.kube/config
owner: vagrant
group: vagrant
mode: 0600
tags:
- k8s
- name: List Kubernetes nodes
become: yes
command: "kubectl --kubeconfig=/home/vagrant/admin.conf get nodes"
register: nodes
tags:
- k8s
- debug: var=nodes.stdout_lines
- name: Ensure dashboard file
become: yes
get_url:
url: https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
dest: /var/tmp/kubernetes-dashboard.yaml
mode: 0444
tags:
- k8s
- name: Install dashboard rbac
become: yes
command: "kubectl --kubeconfig=/home/vagrant/admin.conf apply -f /var/tmp/dashboard-rbac-config.yaml"
tags:
- k8s
- name: Install tiller rbac
become: yes
command: "kubectl --kubeconfig=/home/vagrant/admin.conf apply -f /var/tmp/tiller-rbac-config.yaml"
tags:
- k8s
- name: Install dashboard service
become: yes
command: "kubectl --kubeconfig=/home/vagrant/admin.conf apply -f /var/tmp/kubernetes-dashboard.yaml"
tags:
- k8s
- name: Ensure Helm files
become: yes
get_url:
url: https://storage.googleapis.com/kubernetes-helm/helm-v2.12.1-linux-amd64.tar.gz
dest: /var/tmp/helm.tar.gz
mode: 0444
tags:
- k8s
- name: Create Helm directory
become: yes
file:
path: /var/tmp/helm
state: directory
tags:
- k8s
- name: Decompress Helm files
become: yes
unarchive:
src: /var/tmp/helm.tar.gz
dest: /var/tmp/helm
tags:
- k8s
- name: Install helm command
become: yes
command: "cp /var/tmp/helm/linux-amd64/helm /usr/local/bin"
tags:
- k8s
- name: Install tiller command
become: yes
command: "cp /var/tmp/helm/linux-amd64/tiller /usr/local/bin"
tags:
- k8s
- name: Ensure Metrics server files
become: yes
git:
repo: 'https://github.com/kubernetes-incubator/metrics-server.git'
dest: /var/tmp/metrics-server
tags:
- k8s
- name: List Metrics server directory
become: yes
command: "ls -al /var/tmp/metrics-server"
register: lstmp
tags:
- k8s
- debug: var=lstmp.stdout_lines
- name: Modify Metrics server deployment
become: yes
blockinfile:
dest: /var/tmp/metrics-server/deploy/1.8+/metrics-server-deployment.yaml
insertafter: 'imagePullPolicy: Always'
block: " command: ['/metrics-server', '--kubelet-insecure-tls', '--kubelet-preferred-address-types=InternalIP']"
backup: yes
tags:
- k8s
- name: Print Metrics server deployment
become: yes
command: "cat /var/tmp/metrics-server/deploy/1.8+/metrics-server-deployment.yaml"
register: cat
tags:
- k8s
- debug: var=cat.stdout_lines
- name: Install Metrics server
become: yes
command: "kubectl --kubeconfig=/home/vagrant/admin.conf create -f /var/tmp/metrics-server/deploy/1.8+/"
tags:
- k8s
- name: Create join script
become: yes
command: "create-join-script"
tags:
- k8s
- name: Copy join script
become: yes
copy:
src: /home/vagrant/join.sh
dest: /vagrant/join.sh
owner: vagrant
group: vagrant
mode: 0600
tags:
- k8s
- name: Create Docker Registry data directory
become: yes
file:
path: /var/tmp/docker-registry/data
state: directory
tags:
- k8s
- name: Create Docker Registry certs directory
become: yes
file:
path: /var/tmp/docker-registry/certs
state: directory
tags:
- k8s
- name: Create Docker Registry auth directory
become: yes
file:
path: /var/tmp/docker-registry/auth
state: directory
tags:
- k8s
- name: Copy Docker Registry files
become: yes
copy:
content: |
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: local-pv-claim-k8s1-docker-registry
labels:
app: registry
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: registry
---
apiVersion: v1
kind: Pod
metadata:
name: registry
labels:
app: registry
spec:
containers:
- name: registry
image: registry:2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5000
volumeMounts:
- mountPath: /var/lib/registry
subPath: data
name: registry-data
- mountPath: /etc/certs
subPath: certs
name: registry-data
- mountPath: /etc/auth
subPath: auth
name: registry-data
env:
- name: REGISTRY_HTTP_ADDR
value: "0.0.0.0:5000"
- name: REGISTRY_HTTP_TLS_CERTIFICATE
value: "/etc/certs/docker-registry.crt"