Initial commit

parents
.vagrant/
.vagrant
ubuntu-xenial-16.04-cloudimg-console.log
k8s-master.retry
k8s-worker.retry
admin.conf
join.sh
docker-registry.crt
Dockerfile
$bootstrap_ansible = <<-SHELL
echo "Installing Ansible..."
sudo apt-get update -y
sudo apt-get install -y software-properties-common
sudo apt-add-repository ppa:ansible/ansible
sudo apt-get update -y
sudo apt-get install -y ansible apt-transport-https
SHELL
$restart_kubelet = <<-SHELL
echo "Restarting Kubelet..."
sudo systemctl daemon-reload
sudo systemctl restart kubelet
SHELL
Vagrant.configure(2) do |config|
(1..3).each do |i|
config.vm.define "k8s#{i}" do |s|
s.ssh.forward_agent = true
s.vm.box = "ubuntu/xenial64"
s.vm.hostname = "k8s#{i}"
s.vm.provision :shell,
inline: $bootstrap_ansible
if i == 1
s.vm.provision :shell,
inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/k8s-master.yml -c local"
else
s.vm.provision :shell,
inline: "PYTHONUNBUFFERED=1 ansible-playbook /vagrant/ansible/k8s-worker.yml -c local"
end
s.vm.provision :shell,
inline: "echo 'KUBELET_EXTRA_ARGS=--node-ip=192.168.56.#{i+10}' | sudo tee /etc/default/kubelet"
s.vm.provision :shell,
inline: $restart_kubelet
s.vm.network "private_network",
ip: "192.168.56.#{i+10}",
netmask: "255.255.255.0",
auto_config: true
#virtualbox__intnet: "k8s-net"
s.vm.provider "virtualbox" do |v|
v.name = "k8s#{i}"
v.cpus = 2
v.memory = 2048
v.gui = true
v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
v.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ]
#v.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
end
end
end
end
- hosts: localhost
remote_user: vagrant
serial: 1
roles:
- k8s-base
- k8s-master
- hosts: localhost
remote_user: vagrant
serial: 1
roles:
- k8s-base
- k8s-worker
#!/bin/bash
systemctl stop kubelet;
docker rm -f $(docker ps -q); mount | grep "/var/lib/kubelet/*" | awk '{print $3}' | xargs umount 1>/dev/null 2>/dev/null;
rm -rf /var/lib/kubelet /etc/kubernetes /var/lib/etcd /etc/cni /etc/kubernetes;
mkdir -p /etc/kubernetes
ip link set cbr0 down; ip link del cbr0;
ip link set cni0 down; ip link del cni0;
systemctl start kubelet
#!/bin/bash
docker run -d --restart=always -p 2376:2375 \
-v /var/run/docker.sock:/var/run/docker.sock \
alpine/socat tcp-listen:2375,fork,reuseaddr unix-connect:/var/run/docker.sock
---
- name: Remove Default Host Entry
become: yes
lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.1\.1\s+k8s.*$'
state: absent
tags:
- k8s
- name: Ensure Hosts File
become: yes
lineinfile:
dest: /etc/hosts
line: "{{ item.ip }} {{ item.name }}"
with_items:
- { ip: "192.168.56.11", name: "k8s1" }
- { ip: "192.168.56.12", name: "k8s2" }
- { ip: "192.168.56.13", name: "k8s3" }
tags:
- k8s
- name: Ensure Docker Apt Key
become: yes
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
tags:
- k8s
- name: Ensure Docker Repository
become: yes
apt_repository:
repo: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable'
state: present
update_cache: yes
tags:
- k8s
- name: Ensure Google Cloud Apt Key
become: yes
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
tags:
- k8s
- name: Ensure Kubernetes Repository
become: yes
apt_repository:
repo: 'deb http://apt.kubernetes.io/ kubernetes-xenial main'
state: present
update_cache: yes
tags:
- k8s
- name: Ensure Base Tools
become: yes
apt:
name: "{{ packages }}"
vars:
packages:
- curl
- jq
- bc
- gawk
- apt-transport-https
tags:
- k8s
- name: Update system
become: yes
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 86400 #One day
tags:
- k8s
- name: Ensure Docker
become: yes
apt:
name: docker-ce=18.06.2~ce~3-0~ubuntu
tags:
- k8s
- name: Ensure kubelet
become: yes
apt:
name: kubelet=1.14.1-00
tags:
- k8s
- name: Ensure kubeadm
become: yes
apt:
name: kubeadm=1.14.1-00
tags:
- k8s
- name: Ensure Docker Group
group:
name: docker
state: present
tags:
- k8s
- name: Ensure User in Docker Group
user:
name=vagrant
groups=docker
append=yes
tags:
- k8s
- name: Ensure Kubernetes Cleanup
become:
copy:
src: files/clean-k8s
dest: /usr/local/bin
mode: 0755
owner: root
group: root
tags:
- k8s
- name: Ensure Docker Socat
become:
copy:
src: files/docker-socat
dest: /usr/local/bin
mode: 0755
owner: root
group: root
tags:
- k8s
- name: Ensure swap is off
become: yes
command: "swapoff -a"
tags:
- k8s
- name: Remove swap from fstab
become: yes
lineinfile:
dest: /etc/fstab
regexp: 'swap'
state: absent
tags:
- k8s
- name: Create playground directories
become: yes
file:
path: "{{ item.path }}"
state: directory
with_items:
- { path: "/var/tmp/kafka/kafka-logs" }
- { path: "/var/tmp/zookeeper/data" }
- { path: "/var/tmp/zookeeper/datalog" }
- { path: "/var/tmp/flink/jobmanager" }
- { path: "/var/tmp/flink/taskmanager" }
tags:
- k8s
#!/bin/bash
echo "kubeadm join 192.168.56.11:6443 --token $(kubeadm-token) --discovery-token-ca-cert-hash sha256:$(kubeadm-hash)" > /home/vagrant/join.sh
#!/bin/bash
kubectl create -f /var/tmp/storageclass-config.yaml
kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
#!/bin/bash
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
#!/bin/bash
kubectl create -f /var/tmp/docker-registry.yaml
#!/bin/bash
kubectl delete -f /var/tmp/docker-registry.yaml
#!/bin/bash
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
#!/bin/bash
kubeadm token list | grep "authentication" | awk '{ print $1 }'
#!/bin/bash
kubectl create -f /var/tmp/rbac-kdd.yaml
kubectl create -f /var/tmp/calico.yaml
#!/bin/bash
helm init --service-account tiller
helm repo update
#!/bin/bash
kubectl taint nodes --all node-role.kubernetes.io/master-
This diff is collapsed.
---
- name: Copy config to home directory
become: yes
copy:
src: /vagrant/admin.conf
dest: /home/vagrant/admin.conf
owner: vagrant
group: vagrant
mode: 0600
tags:
- k8s
- name: Update Environment
become: yes
lineinfile:
path: /home/vagrant/.bashrc
regexp: '^export KUBECONFIG='
line: 'export KUBECONFIG=/home/vagrant/admin.conf'
state: present
tags:
- k8s
- name: Copy join script
become: yes
copy:
src: /vagrant/join.sh
dest: /home/vagrant/join.sh
owner: vagrant
group: vagrant
mode: 0700
tags:
- k8s
- name: Join Kubernetes Cluster
become: yes
command: "sh /home/vagrant/join.sh"
tags:
- k8s
- name: Copy Docker Registry Self-signed Certificate
become: yes
shell: |
mkdir -p /etc/docker/certs.d/192.168.56.11:30000
cp /vagrant/docker-registry.crt /etc/docker/certs.d/192.168.56.11:30000/ca.crt
args:
executable: /bin/bash
tags:
- k8s
#!/bin/bash
pushd docker/zookeeper
./build.sh
popd
pushd docker/kafka
./build.sh
popd
pushd docker/flink
./build.sh
popd
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for installing Flink volumes
name: flink
version: 0.1.0
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "flink.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "flink.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flink.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- $fullname := include "flink.fullname" . -}}
{{- $name := include "flink.name" . -}}
{{- $root := . }}
{{ range .Values.nodes }}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ $fullname }}-pv-jobmanager-k8s{{ . }}
labels:
component: {{ $name }}
role: jobmanager
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
local:
path: /var/tmp/flink/jobmanager
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s{{ . }}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ $fullname }}-pv-taskmanager-1-k8s{{ . }}
labels:
component: {{ $name }}
role: taskmanager
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
local:
path: /var/tmp/flink/taskmanager
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s{{ . }}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ $fullname }}-pv-taskmanager-2-k8s{{ . }}
labels:
component: {{ $name }}
role: taskmanager
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
local:
path: /var/tmp/flink/taskmanager
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s{{ . }}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ $fullname }}-pv-taskmanager-3-k8s{{ . }}
labels:
component: {{ $name }}
role: taskmanager
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
local:
path: /var/tmp/flink/taskmanager
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s{{ . }}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ $fullname }}-pv-taskmanager-4-k8s{{ . }}
labels:
component: {{ $name }}
role: taskmanager
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
local:
path: /var/tmp/flink/taskmanager
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s{{ . }}
{{ end }}
nodes:
- "1"
- "2"
- "3"
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for installing Flink
name: flink
version: 0.1.0
{{- $name := include "flink.name" . -}}
*** Use Flink service from container running in host ***
List jobs running on Flink service by running these commands:
export FLINK_PORT=$(kubectl get services -l app={{ $name }},external=true -o jsonpath="{.items[0].spec.ports[0].nodePort}")
export NODE_IP=$(kubectl get nodes -l kubernetes.io/hostname=k8s1 -o jsonpath="{.items[0].status.addresses[0].address}")
kubectl run flink-cli --rm -it --restart=Never --image=flink:1.7.2-hadoop28-scala_2.11-alpine -- flink list -m {{ $name }}-jobmanager-headless:8081
Submit a job to Flink service by running these commands:
cat <<EOF >Dockerfile
FROM flink:1.7.2-hadoop28-scala_2.11-alpine
COPY some-flink-job.jar /some-flink-job.jar
EOF
docker build -t 192.168.56.11:30000/flink-with-job-file:1.0 .
docker push 192.168.56.11:30000/flink-with-job-file:1.0
kubectl run flink-cli --rm -it --restart=Never --image=192.168.56.11:30000/flink-with-job-file:1.0 --overrides='{ "apiVersion": "v1", "spec": { "imagePullSecrets": [{"name": "regcred"}] } }' -- flink run -m {{ $name }}-jobmanager-headless:8081 -p 2 -c $CLASS_NAME /some-flink-job.jar $ARGS
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "flink.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "flink.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flink.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- $fullname := include "flink.fullname" . -}}
{{- $name := include "flink.name" . -}}
{{- $chart := include "flink.chart" . -}}
{{- $root := . }}
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: {{ $fullname }}-jobmanager
labels:
app: {{ $name }}
chart: {{ $chart }}
release: {{ $root.Release.Name }}
heritage: {{ $root.Release.Service }}
spec:
replicas: 1
selector:
matchLabels:
app: {{ $name }}
role: "jobmanager"
release: {{ $root.Release.Name }}
template:
metadata:
labels:
app: {{ $name }}
role: "jobmanager"
release: {{ $root.Release.Name }}
spec:
containers:
- name: {{ $root.Chart.Name }}
image: "{{ $root.Values.image.repository }}:{{ $root.Values.image.tag }}"
imagePullPolicy: {{ $root.Values.image.pullPolicy }}
command:
- /custom_entrypoint.sh
args:
- jobmanager
ports:
- name: ui
containerPort: 8081
protocol: TCP
- name: rpc
containerPort: 6123
protocol: TCP