deploy mongodb database
This commit is contained in:
parent
0df0def3db
commit
271d48c03d
|
|
@ -1,31 +0,0 @@
|
||||||
######################################
|
|
||||||
# Configuration Client-Server #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Edit Network interface file on Client Server
|
|
||||||
blockinfile:
|
|
||||||
path: /etc/network/interfaces
|
|
||||||
block: |
|
|
||||||
auto enp7s0
|
|
||||||
iface enp7s0 inet dhcp
|
|
||||||
post-up ip route add default via 10.0.0.1
|
|
||||||
|
|
||||||
- name: Add Hetzner Nameserver
|
|
||||||
blockinfile:
|
|
||||||
path: /etc/resolvconf/resolv.conf.d/head
|
|
||||||
block: |
|
|
||||||
nameserver 8.8.8.8
|
|
||||||
nameserver 8.8.4.4
|
|
||||||
|
|
||||||
- name: Enable Updates for resolvconf
|
|
||||||
raw: "resolvconf --enable-updates"
|
|
||||||
|
|
||||||
- name: Update resolvconf
|
|
||||||
raw: "resolvconf -u"
|
|
||||||
|
|
||||||
- name: Reboot Clients
|
|
||||||
reboot:
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
######################################
|
|
||||||
# Configuration NAT-Server #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Copy Public ssh-key and paste to NAT-Server
|
|
||||||
copy:
|
|
||||||
src: /tmp/id_rsa.pub
|
|
||||||
dest: ~/.ssh/
|
|
||||||
|
|
||||||
- name: Copy Private ssh-key and paste to NAT-Server
|
|
||||||
copy:
|
|
||||||
src: /tmp/id_rsa
|
|
||||||
dest: ~/.ssh/
|
|
||||||
|
|
||||||
- name: Change Permission of the Private ssh-key only to read for the User
|
|
||||||
raw: "chmod 0400 ~/.ssh/id_rsa"
|
|
||||||
|
|
||||||
- name: Edit Network interface file on NAT-Server
|
|
||||||
blockinfile:
|
|
||||||
path: /etc/network/interfaces
|
|
||||||
block: |
|
|
||||||
auto eth0
|
|
||||||
iface eth0 inet dhcp
|
|
||||||
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
|
|
||||||
post-up iptables -t nat -A POSTROUTING -s '10.0.0.0/16' -o eth0 -j MASQUERADE
|
|
||||||
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
######################################
|
|
||||||
# Generate SSH-Key #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Generate an OpenSSH keypair local
|
|
||||||
community.crypto.openssh_keypair:
|
|
||||||
path: /tmp/id_rsa
|
|
||||||
type: rsa
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
- name: Deploy example-app from manifest
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('file', './manifests/example-app/deploy.yml') | from_yaml_all }}"
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
######################################
|
|
||||||
# Install cert-manager in cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Read cert-manager values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/cert_manager/certManager.yml
|
|
||||||
|
|
||||||
- name: Create cert-manager namespace
|
|
||||||
k8s:
|
|
||||||
state: present
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: "{{ namespace }}"
|
|
||||||
annotations:
|
|
||||||
linkerd.io/inject: 'enabled'
|
|
||||||
|
|
||||||
- name: Add cert-manager repo
|
|
||||||
kubernetes.core.helm_repository:
|
|
||||||
name: "{{ helm.releaseName }}"
|
|
||||||
repo_url: "{{ helm.repoUrl }}"
|
|
||||||
|
|
||||||
- name: Install CRDs for cert-manager
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/cert_manager/cert_manager_crds.yaml') | from_yaml_all }}"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Deploy cert-manager from helm chart
|
|
||||||
kubernetes.core.helm:
|
|
||||||
name: "{{ helm.releaseName }}"
|
|
||||||
state: present
|
|
||||||
chart_ref: "{{ helm.chart }}"
|
|
||||||
release_namespace: "{{ namespace }}"
|
|
||||||
chart_version: "{{ helm.chartVersion }}"
|
|
||||||
update_repo_cache: "true"
|
|
||||||
|
|
||||||
- name: Pause for 1.5 minutes and wait for cert-manager webhook
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 90
|
|
||||||
|
|
||||||
- name: Deploy cert-manager lets-encrypt staging config file
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/cert_manager/lets_encrypt_staging.yml.j2') | from_yaml_all }}"
|
|
||||||
|
|
||||||
- name: Deploy cert-manager lets-encrypt production config file
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/cert_manager/lets_encrypt_production.yml.j2') | from_yaml_all }}"
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
######################################
|
|
||||||
# add controller to existing cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: copy clusterConfig to remote location
|
|
||||||
template:
|
|
||||||
src: '../templates/k8s_cluster/cluster/joinController.yml.j2'
|
|
||||||
dest: /tmp/joinController.yml
|
|
||||||
|
|
||||||
- name: Join the controller node to cluster
|
|
||||||
command: kubeadm join --config=/tmp/joinController.yml
|
|
||||||
|
|
||||||
- name: Setup kubeconfig for local usage
|
|
||||||
command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- mkdir -p ~/.kube
|
|
||||||
- cp -i /etc/kubernetes/admin.conf ~/.kube/config
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
######################################
|
|
||||||
# add worker to existing cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Copy the worker join command to server location
|
|
||||||
copy: src=join_command_worker.sh dest=/tmp/join_command_worker.sh mode=0777
|
|
||||||
|
|
||||||
- name: Join the worker node to cluster
|
|
||||||
command: sh /tmp/join_command_worker.sh
|
|
||||||
|
|
||||||
- name: Delete local copy of join worker
|
|
||||||
local_action: file path=./join_command_worker.sh state=absent
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
######################################
|
|
||||||
# Tasks for init k8s cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Generate join command
|
|
||||||
command: kubeadm token create --print-join-command
|
|
||||||
register: join_command
|
|
||||||
|
|
||||||
- name: Copy join command to local file
|
|
||||||
local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join_command_worker.sh"
|
|
||||||
|
|
||||||
- name: Generate join command controller token
|
|
||||||
command: kubeadm token create
|
|
||||||
register: join_command_token
|
|
||||||
|
|
||||||
- name: Generate join command controller certsKey
|
|
||||||
command: kubeadm init phase upload-certs --upload-certs
|
|
||||||
register: join_command_controller_certskey
|
|
||||||
|
|
||||||
- name: Generate join command controller certssh256
|
|
||||||
shell: openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
|
||||||
register: join_command_controller_certsha256
|
|
||||||
|
|
||||||
- name: save facts for controller join
|
|
||||||
set_fact:
|
|
||||||
token: '{{ join_command_token.stdout }}'
|
|
||||||
certskey: '{{ join_command_controller_certskey.stdout_lines[-1] }}'
|
|
||||||
certsha256: '{{ join_command_controller_certsha256.stdout }}'
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
||||||
######################################
|
|
||||||
# Tasks for init k8s cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Get hostname
|
|
||||||
command: hostname
|
|
||||||
register: old_hostname
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- set_fact: hostname={{ old_hostname.stdout | lower }}
|
|
||||||
|
|
||||||
- name: Pull k8s images
|
|
||||||
command: kubeadm config images pull --kubernetes-version=v{{ kubernetesVersion }}
|
|
||||||
|
|
||||||
- name: copy clusterConfig to remote location
|
|
||||||
template:
|
|
||||||
src: '../templates/k8s_cluster/cluster/clusterConfiguration.yml.j2'
|
|
||||||
dest: /tmp/clusterConfiguration.yml
|
|
||||||
|
|
||||||
- name: Initialize the Kubernetes cluster using kubeadm
|
|
||||||
command:
|
|
||||||
argv:
|
|
||||||
- kubeadm
|
|
||||||
- init
|
|
||||||
- --config=/tmp/clusterConfiguration.yml
|
|
||||||
- --node-name={{ hostname }}
|
|
||||||
- --ignore-preflight-errors
|
|
||||||
- Swap
|
|
||||||
- --upload-certs
|
|
||||||
|
|
||||||
- name: Remove clusterConfig on remote location
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /tmp/clusterConfiguration.yml
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Setup kubeconfig for local usage
|
|
||||||
command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- mkdir -p ~/.kube
|
|
||||||
- cp -i /etc/kubernetes/admin.conf ~/.kube/config
|
|
||||||
|
|
||||||
- name: Wait for all k8s nodes to be ready
|
|
||||||
shell: kubectl wait --for=condition=Ready nodes --all --timeout=600s
|
|
||||||
register: nodes_ready
|
|
||||||
|
|
||||||
- name: create Calico NetworkManager directory
|
|
||||||
file:
|
|
||||||
path: '/etc/NetworkManager/conf.d/'
|
|
||||||
state: directory
|
|
||||||
mode: 0755
|
|
||||||
|
|
||||||
- name: Configure Calico NetworkManager
|
|
||||||
template:
|
|
||||||
src: ../templates/k8s_cluster/cluster/calico.conf.j2
|
|
||||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
|
||||||
owner: root
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
- name: Install calico pod network
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/cluster/calico.yml.j2') | from_yaml_all }}"
|
|
||||||
|
|
||||||
- name: Wait for calico daemonset become ready
|
|
||||||
command: "kubectl rollout status daemonset calico-node -n kube-system --timeout 60s"
|
|
||||||
|
|
||||||
- name: Generate join command
|
|
||||||
command: kubeadm token create --print-join-command
|
|
||||||
register: join_command
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
########################################
|
|
||||||
#Restart DNS after DeamonSet Deployment#
|
|
||||||
########################################
|
|
||||||
---
|
|
||||||
- name: Wait for calico pods become ready
|
|
||||||
command: "kubectl rollout status daemonset calico-node -n kube-system --timeout 120s"
|
|
||||||
|
|
||||||
- name: Restart CoreDNS deployment
|
|
||||||
command: "kubectl rollout restart deployments/coredns -n kube-system"
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
######################################
|
|
||||||
# INstall Helm3 in cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Read helm3 values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/helm/helm3.yml
|
|
||||||
|
|
||||||
- name: Download Helm install script
|
|
||||||
get_url:
|
|
||||||
url: "{{ helm_install_script }}"
|
|
||||||
dest: "~/get_helm.sh"
|
|
||||||
mode: 0700
|
|
||||||
|
|
||||||
- name: Install Helm
|
|
||||||
command: "~/get_helm.sh"
|
|
||||||
|
|
||||||
- name: Delete Helm install script
|
|
||||||
file:
|
|
||||||
state: absent
|
|
||||||
path: "~/get_helm.sh"
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
######################################
|
|
||||||
# Deploy nginx ingress controller #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Read ingress nginx values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/ingress/ingressNginx.yml
|
|
||||||
|
|
||||||
- name: "Create namespace '{{ namespace }}'"
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition:
|
|
||||||
api_version: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: '{{ namespace }}'
|
|
||||||
labels:
|
|
||||||
name: '{{ namespace }}'
|
|
||||||
|
|
||||||
- name: Add nginx ingress controller chart repo
|
|
||||||
kubernetes.core.helm_repository:
|
|
||||||
name: "{{ helm.releaseName }}"
|
|
||||||
repo_url: "{{ helm.repoUrl }}"
|
|
||||||
|
|
||||||
- name: Deploy nginx ingress controller from helm chart
|
|
||||||
kubernetes.core.helm:
|
|
||||||
name: '{{ helm.releaseName }}'
|
|
||||||
state: present
|
|
||||||
chart_ref: '{{ helm.chart }}'
|
|
||||||
release_namespace: '{{ namespace }}'
|
|
||||||
chart_version: '{{ helm.chartVersion }}'
|
|
||||||
update_repo_cache: 'true'
|
|
||||||
## ToDo: Nginx Controller mit eigenen Values deployen
|
|
||||||
# values: "{{ lookup('template', '../templates/k8s_cluster/ingress/ingressNginxValues.yml') | from_yaml }}"
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
######################################
|
|
||||||
# Deploy kube-vip virtualIP #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Deploy kube-vip as static pod
|
|
||||||
template:
|
|
||||||
src: ../templates/k8s_cluster/kube_vip/kube_vip.yml.j2
|
|
||||||
dest: /etc/kubernetes/manifests/kube-vip.yml
|
|
||||||
owner: root
|
|
||||||
mode: '0600'
|
|
||||||
when: installKubeVip
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
######################################
|
|
||||||
#tasks for vanilla kubernetes install#
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Get OS version name
|
|
||||||
command: lsb_release -cs
|
|
||||||
register: os_codename
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Get OS release number
|
|
||||||
command: lsb_release -rs
|
|
||||||
register: os_release
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Add an apt signing key for CRI-O
|
|
||||||
apt_key:
|
|
||||||
url: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
loop:
|
|
||||||
- 'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ crio_version }}/Debian_{{ os_release.stdout }}/Release.key'
|
|
||||||
- 'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_{{ os_release.stdout }}/Release.key'
|
|
||||||
|
|
||||||
- name: Add CRI-O apt repository for stable version
|
|
||||||
apt_repository:
|
|
||||||
repo: deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_{{ os_release.stdout }}/ /
|
|
||||||
filename: devel:kubic:libcontainers:stable.list
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
- apt_repository:
|
|
||||||
repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ crio_version }}/Debian_{{ os_release.stdout }}/ /
|
|
||||||
filename: devel:kubic:libcontainers:stable:cri-o:{{ crio_version }}.list
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Install CRI-O packages
|
|
||||||
apt:
|
|
||||||
name: "{{ packages }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
allow_unauthenticated: true
|
|
||||||
vars:
|
|
||||||
packages:
|
|
||||||
- cri-o
|
|
||||||
- cri-o-runc
|
|
||||||
|
|
||||||
- name: Enable and start CRI-O service
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
name: crio.service
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- name: CRI-O use systemd cgroup driver
|
|
||||||
copy:
|
|
||||||
dest: "/etc/crio/crio.conf.d/02-cgroup-manager.conf"
|
|
||||||
content: |
|
|
||||||
[crio.runtime]
|
|
||||||
conmon_cgroup = "pod"
|
|
||||||
cgroup_manager = "systemd"
|
|
||||||
|
|
||||||
- name: Overriding the CRI-O sandbox (pause) image
|
|
||||||
lineinfile:
|
|
||||||
path: /etc/crio/crio.conf
|
|
||||||
regexp: '#? ?pause_image ?= ?"registry\.k8s\.io/pause:(.+)"'
|
|
||||||
backrefs: True
|
|
||||||
line: pause_image = "registry.k8s.io/pause:\1"
|
|
||||||
|
|
||||||
- name: Forwarding IPv4 and letting iptables see bridged traffic
|
|
||||||
copy:
|
|
||||||
dest: "/etc/modules-load.d/k8s.conf"
|
|
||||||
content: |
|
|
||||||
overlay
|
|
||||||
br_netfilter
|
|
||||||
|
|
||||||
- name: modprobe overlay & br-netfilter
|
|
||||||
command: "{{ item }}"
|
|
||||||
loop:
|
|
||||||
- modprobe overlay
|
|
||||||
- modprobe br_netfilter
|
|
||||||
|
|
||||||
#sysctl params required by setup, params persist across reboots
|
|
||||||
- name: ipv4 bridge forward
|
|
||||||
copy:
|
|
||||||
dest: "/etc/sysctl.d/k8s.conf"
|
|
||||||
content: |
|
|
||||||
net.bridge.bridge-nf-call-iptables = 1
|
|
||||||
net.bridge.bridge-nf-call-ip6tables = 1
|
|
||||||
net.ipv4.ip_forward = 1
|
|
||||||
|
|
||||||
- name: Apply sysctl params without reboot
|
|
||||||
command: sysctl --system
|
|
||||||
|
|
||||||
|
|
||||||
- name: Import Kubernetes GPG key
|
|
||||||
raw: "curl -fsSL https://pkgs.k8s.io/core:/stable:/v{{ kubernetesVersion.split('.')[:2] | join('.') }}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg"
|
|
||||||
|
|
||||||
- name: Add Kubernetes apt repository
|
|
||||||
raw: "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v{{ kubernetesVersion.split('.')[:2] | join('.') }}/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
|
||||||
|
|
||||||
- name: Remove swapfile from /etc/fstab
|
|
||||||
mount:
|
|
||||||
name: "{{ item }}"
|
|
||||||
fstype: swap
|
|
||||||
state: absent
|
|
||||||
with_items:
|
|
||||||
- swap
|
|
||||||
- none
|
|
||||||
|
|
||||||
- name: Disable swap
|
|
||||||
command: swapoff -a
|
|
||||||
|
|
||||||
- name: Update apt cache
|
|
||||||
raw: apt-get -y update
|
|
||||||
changed_when: False
|
|
||||||
|
|
||||||
- name: Install Kubernetes binaries
|
|
||||||
apt:
|
|
||||||
name: "{{ packages }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
vars:
|
|
||||||
packages:
|
|
||||||
- "kubelet={{ kubernetesVersion }}-1.1"
|
|
||||||
- "kubeadm={{ kubernetesVersion }}-1.1"
|
|
||||||
- "kubectl={{ kubernetesVersion }}-1.1"
|
|
||||||
|
|
||||||
- name: Add kubectl completion bash
|
|
||||||
lineinfile:
|
|
||||||
path: ~/.bashrc
|
|
||||||
line: source <(kubectl completion bash)
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
######################################
|
|
||||||
# Setup k8s Cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: kubernetes installation
|
|
||||||
block:
|
|
||||||
- name: vanilla kubernetes install block
|
|
||||||
when: kubernetesClusterType == 'vanilla'
|
|
||||||
block:
|
|
||||||
- name: Read vanilla kubernetes values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/kubernetes/vanilla_kubernetes.yml
|
|
||||||
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/kubernetes/install_vanilla_kubernetes.yml
|
|
||||||
|
|
||||||
# ToDo: find solution for VIP
|
|
||||||
# - name: Read kube_vip values for virtual IP
|
|
||||||
# include_vars:
|
|
||||||
# file: ../vars/k8s_cluster/kube_vip/kube_vip.yml
|
|
||||||
# - import_tasks: ../tasks/k8s_cluster/kube_vip/install_kube_vip.yml
|
|
||||||
# when: inventory_hostname in groups['controller']
|
|
||||||
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/cluster/vanilla_kubernetes/init_kubernetes_cluster.yml
|
|
||||||
when: inventory_hostname in groups['controller_init']
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/cluster/vanilla_kubernetes/generate_join_command.yml
|
|
||||||
when: inventory_hostname in groups['controller_init']
|
|
||||||
#ToDo: when controller replica exists
|
|
||||||
## - import_tasks: ../tasks/k8s_cluster/cluster/vanilla_kubernetes/add_controller_to_cluster.yml
|
|
||||||
## when: inventory_hostname in groups['controller_replica']
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/cluster/vanilla_kubernetes/add_worker_to_cluster.yml
|
|
||||||
when: inventory_hostname in groups['worker']
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/cluster/vanilla_kubernetes/restart_coredns.yml
|
|
||||||
when: inventory_hostname in groups['controller_init']
|
|
||||||
#
|
|
||||||
# - name: install microk8s block
|
|
||||||
# when: kubernetesClusterType == 'microk8s'
|
|
||||||
# block:
|
|
||||||
# - debug: msg='ToDo install microk8s'
|
|
||||||
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
######################################
|
|
||||||
# Install MetalLB in cluster #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Read metallb values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/loadbalancer/metallb.yml
|
|
||||||
|
|
||||||
- name: Create metallb namespace
|
|
||||||
k8s:
|
|
||||||
state: present
|
|
||||||
definition:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: "{{ namespace }}"
|
|
||||||
annotations:
|
|
||||||
linkerd.io/inject: 'enabled'
|
|
||||||
|
|
||||||
- name: Add metallb repo
|
|
||||||
kubernetes.core.helm_repository:
|
|
||||||
name: "{{ helm.releaseName }}"
|
|
||||||
repo_url: "{{ helm.repoUrl }}"
|
|
||||||
|
|
||||||
- name: Deploy metalb from helm chart
|
|
||||||
kubernetes.core.helm:
|
|
||||||
name: "{{ helm.releaseName }}"
|
|
||||||
state: present
|
|
||||||
chart_ref: "{{ helm.chart }}"
|
|
||||||
release_namespace: "{{ namespace }}"
|
|
||||||
chart_version: "{{ helm.chartVersion }}"
|
|
||||||
update_repo_cache: "true"
|
|
||||||
values: "{{ lookup('template', '../templates/k8s_cluster/loadbalancer/metallb.yml') | from_yaml }}"
|
|
||||||
|
|
||||||
- name: Pause for 25 seconds and wait for metallb
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 25
|
|
||||||
|
|
||||||
- name: Pause for 15 seconds and wait for metallb webhook
|
|
||||||
ansible.builtin.pause:
|
|
||||||
seconds: 15
|
|
||||||
|
|
||||||
- name: Deploy metallb IPAddressPool
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/loadbalancer/metal_lb_configmap.yml.j2') | from_yaml_all }}"
|
|
||||||
|
|
@ -1,43 +0,0 @@
|
||||||
---
|
|
||||||
- name: Read mongodb community operator values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/mongodb/mongodb.yml
|
|
||||||
#
|
|
||||||
#- name: "Create namespace '{{ namespace }}'"
|
|
||||||
# kubernetes.core.k8s:
|
|
||||||
# state: present
|
|
||||||
# definition:
|
|
||||||
# api_version: v1
|
|
||||||
# kind: Namespace
|
|
||||||
# metadata:
|
|
||||||
# name: '{{ namespace }}'
|
|
||||||
# labels:
|
|
||||||
# name: '{{ namespace }}'
|
|
||||||
#
|
|
||||||
#- name: Add mongodb community operator chart repo
|
|
||||||
# kubernetes.core.helm_repository:
|
|
||||||
# name: "{{ helm.releaseName }}"
|
|
||||||
# repo_url: "{{ helm.repoUrl }}"
|
|
||||||
#
|
|
||||||
#- name: Deploy mongodb community operator from helm chart
|
|
||||||
# kubernetes.core.helm:
|
|
||||||
# name: '{{ helm.releaseName }}'
|
|
||||||
# state: present
|
|
||||||
# chart_ref: '{{ helm.chart }}'
|
|
||||||
# release_namespace: '{{ namespace }}'
|
|
||||||
# chart_version: '{{ helm.chartVersion }}'
|
|
||||||
# update_repo_cache: 'true'
|
|
||||||
# values: "{{ lookup('template', '../templates/k8s_cluster/mongodb/mongodb-operator-values.yml') | from_yaml }}"
|
|
||||||
|
|
||||||
#- name: Deploy mongodb database
|
|
||||||
# kubernetes.core.k8s:
|
|
||||||
# state: present
|
|
||||||
# namespace: '{{ namespace }}'
|
|
||||||
# definition: "{{ lookup('template', '../templates/k8s_cluster/mongodb/mongodb-deploy-db.yml') | from_yaml }}"
|
|
||||||
|
|
||||||
- name: Add secret for mongodb
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
namespace: "{{ namespace }}"
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/mongodb/mongodb-secret.yml') | from_yaml }}"
|
|
||||||
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
||||||
######################################
|
|
||||||
# Install linkerd service mesh #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: install linkerd service mesh
|
|
||||||
when: inventory_hostname in groups['controller']
|
|
||||||
block:
|
|
||||||
- name: Download linkerd install scrip
|
|
||||||
get_url:
|
|
||||||
url: https://run.linkerd.io/install
|
|
||||||
dest: /tmp/linkerd.sh
|
|
||||||
|
|
||||||
- name: Install linkerd CLI
|
|
||||||
shell:
|
|
||||||
cmd: cat /tmp/linkerd.sh | sh
|
|
||||||
|
|
||||||
- name: Set linkerd .bashrc
|
|
||||||
lineinfile:
|
|
||||||
path: ~/.bashrc
|
|
||||||
line: 'PATH=$PATH:/root/.linkerd2/bin'
|
|
||||||
|
|
||||||
- name: init linkerd on controller1
|
|
||||||
when: inventory_hostname in groups['controller_init']
|
|
||||||
block:
|
|
||||||
- name: Install linkerd CRD in Cluster
|
|
||||||
shell: "linkerd install --crds | kubectl apply -f -"
|
|
||||||
|
|
||||||
- name: Install linkerd in Cluster
|
|
||||||
shell: "linkerd install | kubectl apply -f -"
|
|
||||||
|
|
||||||
- name: Wait for linkerd pods become ready
|
|
||||||
command: "kubectl rollout status deployment linkerd-destination -n linkerd --timeout 150s"
|
|
||||||
|
|
||||||
- name: Wait for linkerd pods become ready
|
|
||||||
command: "kubectl rollout status deployment linkerd-proxy-injector -n linkerd --timeout 150s"
|
|
||||||
|
|
||||||
- name: Install linkerd Dashboard
|
|
||||||
shell: "linkerd viz install | kubectl apply -f -"
|
|
||||||
|
|
||||||
- name: Pause for 15 seconds and wait for linkerd installation
|
|
||||||
pause:
|
|
||||||
seconds: 15
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
######################################
|
|
||||||
# Setup ceph prometheus Monitoring #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Storage monitoring block
|
|
||||||
when: inventory_hostname in groups['kubernetes_api']
|
|
||||||
block:
|
|
||||||
- name: Read rook-ceph storage values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/storage/rook_ceph.yml
|
|
||||||
|
|
||||||
- name: Deploy rook CRDs, common resources and operator from manifest
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/monitoring/{{ item }}') | from_yaml_all }}"
|
|
||||||
loop:
|
|
||||||
- 'csi-metrics-service-monitor.yaml'
|
|
||||||
- 'service-monitor.yaml'
|
|
||||||
- 'rbac.yaml'
|
|
||||||
|
|
||||||
- name: Setting monitoring fact rook-ceph
|
|
||||||
set_fact: cephMonitoring=true
|
|
||||||
|
|
||||||
- name: Deploy rook cluster from manifest
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/cluster' + ('-test' if rook_cluster_type == 'dev' else '') + '.yaml') | from_yaml_all }}"
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
######################################
|
|
||||||
# Setup rook-ceph storage #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: "Create namespace '{{ namespace }}'"
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition:
|
|
||||||
api_version: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: '{{ namespace }}'
|
|
||||||
labels:
|
|
||||||
name: '{{ namespace }}'
|
|
||||||
annotations:
|
|
||||||
linkerd.io/inject: 'enabled'
|
|
||||||
|
|
||||||
- name: Deploy rook CRDs, common resources and operator from manifest
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/{{ item }}') | from_yaml_all }}"
|
|
||||||
loop:
|
|
||||||
- 'crds.yaml'
|
|
||||||
- 'common.yaml'
|
|
||||||
- 'operator.yaml'
|
|
||||||
|
|
||||||
- name: Verify if the rook operator is up and running
|
|
||||||
k8s:
|
|
||||||
kind: Deployment
|
|
||||||
name: rook-ceph-operator
|
|
||||||
namespace: "rook-ceph"
|
|
||||||
register: ret
|
|
||||||
until: "ret.get('result', {}).get('status', {}).get('conditions', []) | length and ret.get('result', {}).get('status', {}).get('conditions', [])[0].get('status') == 'True'"
|
|
||||||
retries: 10
|
|
||||||
delay: 20
|
|
||||||
|
|
||||||
# ToDo: Tobi bitte prüfen, ob die Methode so okay ist? Monitoring wird in k8scluster/storage/cephAddPrometheus nochmal gesetzt
|
|
||||||
- name: Setting monitoring fact rook-ceph
|
|
||||||
set_fact: cephMonitoring=false
|
|
||||||
|
|
||||||
- name: Deploy rook cluster from manifest
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/cluster' + ('-test' if rook_cluster_type == 'dev' else '') + '.yaml') | from_yaml_all }}"
|
|
||||||
|
|
||||||
- name: Verify the cluster deploy is complete
|
|
||||||
k8s:
|
|
||||||
kind: CephCluster
|
|
||||||
name: '{{ rook_cluster_config["name"] }}'
|
|
||||||
namespace: "rook-ceph"
|
|
||||||
register: cluster_data
|
|
||||||
until: "cluster_data.get('result', {}).get('status', {}).get('state') == 'Created'"
|
|
||||||
retries: 20
|
|
||||||
delay: 30
|
|
||||||
ignore_errors: yes
|
|
||||||
|
|
||||||
- name: Safety puffer for osd enrollment
|
|
||||||
pause:
|
|
||||||
seconds: 60
|
|
||||||
|
|
||||||
- name: Deploy rook block storage class
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/csi/rbd/storageclass' + ('-test' if rook_cluster_type == 'dev' else '') + '.yaml') | from_yaml_all }}"
|
|
||||||
|
|
||||||
- name: Create rook filesystem
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/filesystem' + ('-test' if rook_cluster_type == 'dev' else '') + '.yaml') | from_yaml_all }}"
|
|
||||||
|
|
||||||
- name: Safety puffer for filesystem enrolment
|
|
||||||
pause:
|
|
||||||
seconds: 25
|
|
||||||
|
|
||||||
- name: Wait ceph fs pods become ready
|
|
||||||
shell: kubectl wait --namespace=rook-ceph --for=condition=Ready pods --selector app=rook-ceph-mds --timeout=600s
|
|
||||||
register: ceph_pods_ready
|
|
||||||
|
|
||||||
- name: Deploy rook file storage class
|
|
||||||
kubernetes.core.k8s:
|
|
||||||
state: present
|
|
||||||
definition: "{{ lookup('template', '../templates/k8s_cluster/storage/rook/csi/cephfs/storageclass.yaml') | from_yaml_all }}"
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
######################################
|
|
||||||
# Setup Storage #
|
|
||||||
######################################
|
|
||||||
---
|
|
||||||
- name: Storage block
|
|
||||||
when: inventory_hostname in (groups['controller_init'])
|
|
||||||
block:
|
|
||||||
- name: Read rook-ceph storage values
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/storage/rook_ceph.yml
|
|
||||||
|
|
||||||
- name: rook internal ceph
|
|
||||||
when: inventory_hostname in groups['controller_init']
|
|
||||||
block:
|
|
||||||
- import_tasks: ../tasks/k8s_cluster/storage/install_rook_ceph_storage.yml
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
#######################################
|
|
||||||
## Tasks to prepare a Debian System #
|
|
||||||
#######################################
|
|
||||||
---
|
|
||||||
- name: Read debian values and prepare system
|
|
||||||
include_vars:
|
|
||||||
file: ../vars/k8s_cluster/system/debian.yml
|
|
||||||
|
|
||||||
- name: Update apt cache
|
|
||||||
raw: apt-get -y update
|
|
||||||
changed_when: False
|
|
||||||
|
|
||||||
- name: Install required system packages
|
|
||||||
apt: name={{ sys_packages }} state=present update_cache=yes cache_valid_time=3600
|
|
||||||
|
|
||||||
- name: Install required kubernetes system packages
|
|
||||||
apt: name={{ k8s_sys_packages }} state=present update_cache=yes cache_valid_time=3600
|
|
||||||
when: inventory_hostname in groups['kubernetes']
|
|
||||||
|
|
||||||
- name: Delete EXTERNALLY-MANAGED python venv
|
|
||||||
ansible.builtin.file:
|
|
||||||
state: absent
|
|
||||||
path: /usr/lib/python3.11/EXTERNALLY-MANAGED
|
|
||||||
|
|
||||||
- name: Install required Python modules
|
|
||||||
pip: name={{ pip_packages }} state=present
|
|
||||||
when: inventory_hostname in groups['kubernetes']
|
|
||||||
|
|
||||||
- name: Get hostname
|
|
||||||
command: hostname
|
|
||||||
register: old_hostname
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- set_fact: hostname={{ old_hostname.stdout | lower }}
|
|
||||||
|
|
||||||
# No capital letters in the hostname
|
|
||||||
- name: Change the hostname
|
|
||||||
command: hostnamectl set-hostname {{ hostname }}
|
|
||||||
Loading…
Reference in New Issue