mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
feat: check inventory when it's changed (#2691)
Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
6ec705768c
commit
48b7c3b34b
|
|
@ -77,6 +77,7 @@ type TaskHostResult struct {
|
|||
Host string `json:"host,omitempty"`
|
||||
Stdout string `json:"stdout,omitempty"`
|
||||
StdErr string `json:"stdErr,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
register: calicoctl_install_version
|
||||
register_type: yaml
|
||||
- name: Install calicoctl
|
||||
when: .calicoctl_install_version.stderr | empty | not
|
||||
when: .calicoctl_install_version.error | empty | not
|
||||
block:
|
||||
- name: Sync calicoctl to remote
|
||||
copy:
|
||||
|
|
@ -29,4 +29,4 @@
|
|||
|
||||
- name: Apply calico
|
||||
command: |
|
||||
helm install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml
|
||||
helm upgrade --install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml
|
||||
|
|
|
|||
|
|
@ -22,4 +22,4 @@
|
|||
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
|
||||
- name: Install cilium
|
||||
command: |
|
||||
helm install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml
|
||||
helm upgrade --install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml
|
||||
|
|
|
|||
|
|
@ -14,4 +14,4 @@
|
|||
|
||||
- name: Apply flannel
|
||||
command: |
|
||||
helm install --create-namespace --namespace kube-flannel flannel /etc/kubernetes/cni/flannel.tgz -f /etc/kubernetes/cni/flannel-values.yaml
|
||||
helm upgrade --install --create-namespace --namespace kube-flannel flannel /etc/kubernetes/cni/flannel.tgz -f /etc/kubernetes/cni/flannel-values.yaml
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
command: runc --version
|
||||
register: runc_install_version
|
||||
- name: Sync runc binary to remote
|
||||
when: or (.runc_install_version.stderr | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
|
||||
when: or (.runc_install_version.error | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/runc/{{ .runc_version }}/{{ .binary_type }}/runc.{{ .binary_type }}
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
command: containerd --version
|
||||
register: containerd_install_version
|
||||
- name: Install containerd
|
||||
when: or (.containerd_install_version.stderr | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
|
||||
when: or (.containerd_install_version.error | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
|
||||
block:
|
||||
- name: Sync containerd binary to remote
|
||||
copy:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
register: crictl_install_version
|
||||
|
||||
- name: Install crictl
|
||||
when: or (.crictl_install_version.stderr | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
|
||||
when: or (.crictl_install_version.error | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
|
||||
block:
|
||||
- name: Sync crictl binary to remote
|
||||
copy:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
register: cridockerd_install_version
|
||||
|
||||
- name: Install cri-dockerd
|
||||
when: or (.cridockerd_install_version.stderr | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
|
||||
when: or (.cridockerd_install_version.error | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
|
||||
block:
|
||||
- name: Sync cri-dockerd Binary to remote
|
||||
copy:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
register: docker_install_version
|
||||
|
||||
- name: Install docker
|
||||
when: or (.docker_install_version.stderr | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
block:
|
||||
- name: Sync docker binary to remote
|
||||
copy:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Check if helm is installed
|
||||
ignore_errors: true
|
||||
command: helm version
|
||||
command: helm version --template "{{ .Version }}"
|
||||
register: helm_install_version
|
||||
- name: Install helm
|
||||
when: or (.helm_install_version.stderr | empty | not) (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
|
||||
when: or (.helm_install_version.error | empty | not) (.helm_install_version.stdout | ne .helm_version)
|
||||
block:
|
||||
- name: Sync helm to remote
|
||||
copy:
|
||||
|
|
@ -21,7 +21,7 @@
|
|||
command: kubeadm version -o short
|
||||
register: kubeadm_install_version
|
||||
- name: Install kubeadm
|
||||
when: or (.kubeadm_install_version.stderr | empty | not) (.kubeadm_install_version.stdout | ne .kube_version)
|
||||
when: or (.kubeadm_install_version.error | empty | not) (.kubeadm_install_version.stdout | ne .kube_version)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubeadm
|
||||
|
|
@ -35,7 +35,7 @@
|
|||
register_type: yaml
|
||||
- name: Sync kubectl to remote
|
||||
when: |
|
||||
or (.kubectl_install_version.stderr | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
|
||||
or (.kubectl_install_version.error | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubectl
|
||||
|
|
@ -47,7 +47,7 @@
|
|||
command: kubelet --version
|
||||
register: kubelet_install_version
|
||||
- name: Install kubelet
|
||||
when: or (.kubelet_install_version.stderr | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
|
||||
when: or (.kubelet_install_version.error | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
|
||||
block:
|
||||
- name: Sync kubelet to remote
|
||||
copy:
|
||||
|
|
|
|||
|
|
@ -33,17 +33,24 @@
|
|||
- name: Check if kubernetes installed
|
||||
when: .groups.k8s_cluster | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Get kubernetes service
|
||||
ignore_errors: true
|
||||
command: systemctl is-active kubelet.service
|
||||
register: kubernetes_install_service
|
||||
- name: Get kubelet.service LoadState and save to variable
|
||||
command: systemctl show kubelet.service -p LoadState --value
|
||||
register: kubernetes_install_LoadState
|
||||
- name: Get kubelet.service ActiveState and save to variable
|
||||
command: systemctl show kubelet.service -p ActiveState --value
|
||||
register: kubernetes_install_ActiveState
|
||||
- name: Get kubernetes version
|
||||
ignore_errors: true
|
||||
command: kubelet --version
|
||||
register: kubernetes_install_version
|
||||
- name: Check kubernetes service and version
|
||||
when: .kubernetes_install_service.stdout | eq "active"
|
||||
when: .kubernetes_install_LoadState.stdout | eq "loaded"
|
||||
block:
|
||||
- name: Kubernetes should be active
|
||||
assert:
|
||||
that: .kubernetes_install_ActiveState.stdout | eq "active"
|
||||
fail_msg: >-
|
||||
kubernetes should be active when it's loaded
|
||||
- name: Check kubernetes version
|
||||
assert:
|
||||
that: .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " | eq .kube_version
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@
|
|||
init_kubernetes_node: >-
|
||||
{{- $initNodes := list -}}
|
||||
{{- range .groups.kube_control_plane -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_service" "stdout" | eq "active" -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}}
|
||||
{{- $initNodes = append $initNodes . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
|
|||
|
|
@ -14,24 +14,9 @@
|
|||
- init/init-cert
|
||||
|
||||
- hosts:
|
||||
- etcd
|
||||
- all
|
||||
tags: ["certs"]
|
||||
roles:
|
||||
- role: certs/renew-etcd
|
||||
when: and (.groups.etcd | default list | empty | not) .renew_etcd
|
||||
|
||||
- hosts:
|
||||
- image_registry
|
||||
tags: ["certs"]
|
||||
roles:
|
||||
- role: certs/renew-registry
|
||||
when: and (.groups.image_registry | default list | empty | not) .renew_image_registry
|
||||
|
||||
- hosts:
|
||||
- kube_control_plane
|
||||
tags: ["certs"]
|
||||
roles:
|
||||
- role: certs/renew-kubernetes
|
||||
when: and (.groups.kube_control_plane | default list | empty | not) .renew_kubernetes
|
||||
- role: certs/renew
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -11,16 +11,26 @@
|
|||
- kube_control_plane
|
||||
gather_facts: true
|
||||
tasks:
|
||||
- name: Get kubernetes service
|
||||
ignore_errors: true
|
||||
command: systemctl is-active kubelet.service
|
||||
register: kubernetes_install_service
|
||||
- name: Get kubelet.service LoadState and save to variable
|
||||
command: systemctl show kubelet.service -p LoadState --value
|
||||
register: kubernetes_install_LoadState
|
||||
- name: Get kubelet.service ActiveState and save to variable
|
||||
command: systemctl show kubelet.service -p ActiveState --value
|
||||
register: kubernetes_install_ActiveState
|
||||
- name: Check kubernetes service and version
|
||||
when: .kubernetes_install_LoadState.stdout | eq "loaded"
|
||||
block:
|
||||
- name: Kubernetes should be active
|
||||
assert:
|
||||
that: .kubernetes_install_ActiveState.stdout | eq "active"
|
||||
fail_msg: >-
|
||||
kubernetes should be active when it's loaded
|
||||
- name: Keep at least one control_plane node.
|
||||
run_once: true
|
||||
command: |
|
||||
{{- $cpNodes := list -}}
|
||||
{{- range .groups.kube_control_plane -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_service" "stdout" | eq "active" -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}}
|
||||
{{- $cpNodes = append $cpNodes . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
register: execute_result
|
||||
|
||||
- name: Execute post install scripts
|
||||
when: .execute_result.stderr | empty
|
||||
when: .execute_result.error | empty
|
||||
command: |
|
||||
for file in /etc/kubekey/scripts/post_install_*.sh; do
|
||||
if [ -f $file ]; then
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@
|
|||
register: execute_result
|
||||
|
||||
- name: Execute pre install scripts
|
||||
when: .execute_result.stderr | empty
|
||||
when: .execute_result.error | empty
|
||||
command: |
|
||||
for file in /etc/kubekey/scripts/pre_install_*.sh; do
|
||||
if [ -f $file ]; then
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
renew_etcd: true
|
||||
renew_image_registry: true
|
||||
renew_kubernetes: true
|
||||
kubernetes:
|
||||
etcd:
|
||||
deployment_type: external
|
||||
|
|
|
|||
|
|
@ -1,25 +1,21 @@
|
|||
---
|
||||
- name: Sync ca file to remote
|
||||
tags: ["certs"]
|
||||
- name: ETCD | Copy CA certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: /etc/ssl/etcd/ssl/ca.crt
|
||||
|
||||
- name: Sync etcd cert file to remote
|
||||
tags: ["certs"]
|
||||
- name: ETCD | Copy server certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.crt
|
||||
dest: /etc/ssl/etcd/ssl/server.crt
|
||||
|
||||
- name: Sync etcd key file to remote
|
||||
tags: ["certs"]
|
||||
- name: ETCD | Copy server private key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.key
|
||||
dest: /etc/ssl/etcd/ssl/server.key
|
||||
|
||||
- name: Restart etcd service
|
||||
tags: ["certs"]
|
||||
- name: ETCD | Restart etcd service to apply new certificates
|
||||
command: systemctl restart etcd
|
||||
|
|
@ -1,20 +1,17 @@
|
|||
---
|
||||
- name: Sync image registry cert file to remote
|
||||
tags: ["certs"]
|
||||
- name: Harbor | Copy image registry certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/ssl/server.crt
|
||||
|
||||
- name: Sync image registry key file to remote
|
||||
tags: ["certs"]
|
||||
- name: Harbor | Copy image registry private key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/ssl/server.key
|
||||
|
||||
- name: Restart harbor service
|
||||
tags: ["certs"]
|
||||
- name: Harbor | Restart Harbor service to apply new certificates
|
||||
command: systemctl restart harbor.service
|
||||
|
|
@ -1,6 +1,4 @@
|
|||
- include_tasks: harbor.yaml
|
||||
tags: ["certs"]
|
||||
when: .image_registry.type | eq "harbor"
|
||||
- include_tasks: registry.yaml
|
||||
tags: ["certs"]
|
||||
when: .image_registry.type | eq "registry"
|
||||
when: .image_registry.type | eq "docker-registry"
|
||||
|
|
@ -1,20 +1,17 @@
|
|||
---
|
||||
- name: Sync image registry cert file to remote
|
||||
tags: ["certs"]
|
||||
- name: Docker Registry | Copy image registry certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/ssl/server.crt
|
||||
|
||||
- name: Sync image registry key file to remote
|
||||
tags: ["certs"]
|
||||
- name: Docker Registry | Copy image registry private key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/ssl/server.key
|
||||
|
||||
- name: Restart registry service
|
||||
tags: ["certs"]
|
||||
- name: Docker Registry | Restart registry service to apply new certificates
|
||||
command: systemctl restart registry.service
|
||||
|
|
@ -1,20 +1,19 @@
|
|||
---
|
||||
- name: Sync etcd ca file to remote
|
||||
tags: ["certs"]
|
||||
- name: ETCD | Copy CA certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: /etc/kubernetes/pki/etcd/ca.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd cert files to remote
|
||||
tags: ["certs"]
|
||||
|
||||
- name: ETCD | Copy client certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.crt
|
||||
dest: /etc/kubernetes/pki/etcd/client.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd key files to remote
|
||||
tags: ["certs"]
|
||||
|
||||
- name: ETCD | Copy client key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.key
|
||||
|
|
@ -1,50 +1,48 @@
|
|||
---
|
||||
- name: Check kubeadm version
|
||||
tags: ["certs"]
|
||||
- name: Kubernetes | Determine installed kubeadm version
|
||||
run_once: true
|
||||
command: kubeadm version -o short
|
||||
register: kubeadm_install_version
|
||||
|
||||
- name: Renew cert by kubeadm
|
||||
tags: ["certs"]
|
||||
- name: Kubernetes | Renew certificates using kubeadm
|
||||
run_once: true
|
||||
command: |
|
||||
{{- if .kubeadm_install_version.stdout | semverCompare "<v1.20.0" }}
|
||||
# Using legacy kubeadm alpha certs renew commands for versions older than v1.20.0
|
||||
/usr/local/bin/kubeadm alpha certs renew apiserver
|
||||
/usr/local/bin/kubeadm alpha certs renew apiserver-kubelet-client
|
||||
/usr/local/bin/kubeadm alpha certs renew front-proxy-client
|
||||
/usr/local/bin/kubeadm alpha certs renew admin.conf
|
||||
/usr/local/bin/kubeadm alpha certs renew controller-manager.conf
|
||||
/usr/local/bin/kubeadm alpha certs renew scheduler.conf
|
||||
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd }}
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
|
||||
/usr/local/bin/kubeadm alpha certs renew etcd-healthcheck-client
|
||||
/usr/local/bin/kubeadm alpha certs renew etcd-peer
|
||||
/usr/local/bin/kubeadm alpha certs renew etcd-server
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
# Using stable kubeadm certs renew commands for v1.20.0 and newer
|
||||
/usr/local/bin/kubeadm certs renew apiserver
|
||||
/usr/local/bin/kubeadm certs renew apiserver-kubelet-client
|
||||
/usr/local/bin/kubeadm certs renew front-proxy-client
|
||||
/usr/local/bin/kubeadm certs renew admin.conf
|
||||
/usr/local/bin/kubeadm certs renew controller-manager.conf
|
||||
/usr/local/bin/kubeadm certs renew scheduler.conf
|
||||
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd }}
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
|
||||
/usr/local/bin/kubeadm certs renew etcd-healthcheck-client
|
||||
/usr/local/bin/kubeadm certs renew etcd-peer
|
||||
/usr/local/bin/kubeadm certs renew etcd-server
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
- name: Fetch kubeconfig to local
|
||||
tags: ["certs"]
|
||||
- name: Kubernetes | Retrieve kubeconfig from remote host
|
||||
run_once: true
|
||||
fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: >-
|
||||
{{ .binary_dir }}/kubeconfig
|
||||
|
||||
- name: Sync kubeconfig to remote
|
||||
tags: ["certs"]
|
||||
- name: Kubernetes | Distribute kubeconfig to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kubeconfig
|
||||
|
|
@ -1,29 +1,29 @@
|
|||
---
|
||||
- include_tasks: kube.yaml
|
||||
tags: ["certs"]
|
||||
|
||||
- include_tasks: etcd.yaml
|
||||
tags: ["certs"]
|
||||
when:
|
||||
- .kubernetes.etcd.deployment_type | eq "external"
|
||||
- .groups.etcd | default list | empty | not
|
||||
- .renew_etcd
|
||||
- .kubernetes.etcd.deployment_type | eq "external"
|
||||
- .groups.etcd | default list | empty | not
|
||||
|
||||
- name: Reload kubernetes pods
|
||||
tags: [ "certs" ]
|
||||
- name: Kubernetes | Restart Kubernetes control plane pods
|
||||
command: |
|
||||
{{- if .cri.container_manager | eq "docker" }}
|
||||
# Restarting Kubernetes control plane pods using Docker
|
||||
docker ps -af name=k8s_PODS_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
|
||||
docker ps -af name=k8s_PODS_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f
|
||||
docker ps -af name=k8s_PODS_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f
|
||||
{{- if and (.kubernetes.etcd.deployment_type | eq "docker") .renew_etcd }}
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "docker" }}
|
||||
# Restarting etcd pods managed by Docker
|
||||
docker ps -af name=k8s_PODS_etcd* -q | xargs --no-run-if-empty docker rm -f
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
# Restarting Kubernetes control plane pods using crictl
|
||||
crictl pods --name kube-apiserver-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
|
||||
crictl pods --name kube-controller-manager-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
|
||||
crictl pods --name kube-scheduler-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
|
||||
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd }}
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
|
||||
# Restarting etcd pods managed by the container runtime
|
||||
crictl pods --name etcd-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: renew/etcd
|
||||
tags: ["certs", "etcd"]
|
||||
when: .groups.etcd | default list | has .inventory_hostname
|
||||
|
||||
- role: renew/kubernetes
|
||||
tags: ["certs", "kubernetes"]
|
||||
when: .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
|
||||
- role: renew/image-registry
|
||||
tags: ["certs", "image-registry"]
|
||||
when: .groups.image_registry | default list | has .inventory_hostname
|
||||
|
|
@ -1,88 +1,88 @@
|
|||
---
|
||||
- name: Check binaries for etcd
|
||||
- name: Binary | Ensure etcd binary is present
|
||||
tags: ["etcd"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.etcd .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/etcd/{{ .etcd_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.etcd .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.etcd .item }}
|
||||
mkdir -p $artifact_path
|
||||
# Attempt to download etcd binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.etcd .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "Failed to download etcd binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.etcd .item }}
|
||||
fi
|
||||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .etcd_version | empty | not
|
||||
|
||||
- name: Check binaries for kube
|
||||
- name: Binary | Ensure Kubernetes binaries are present
|
||||
tags: ["kube"]
|
||||
command: |
|
||||
kube_path={{ .binary_dir }}/kube/{{ .kube_version }}/{{ .item }}
|
||||
if [ ! -f $kube_path/kubelet ]; then
|
||||
mkdir -p $kube_path
|
||||
# download online
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }}
|
||||
mkdir -p $kube_path
|
||||
# Download kubelet if missing
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "Failed to download kubelet. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }}
|
||||
fi
|
||||
if [ ! -f $kube_path/kubeadm ]; then
|
||||
mkdir -p $kube_path
|
||||
# download online
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }}
|
||||
mkdir -p $kube_path
|
||||
# Download kubeadm if missing
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "Failed to download kubeadm. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }}
|
||||
fi
|
||||
if [ ! -f $kube_path/kubectl ]; then
|
||||
mkdir -p $kube_path
|
||||
# download online
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }}
|
||||
mkdir -p $kube_path
|
||||
# Download kubectl if missing
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "Failed to download kubectl. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }}
|
||||
fi
|
||||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .kube_version | empty | not
|
||||
|
||||
- name: Check binaries for cni
|
||||
- name: Binary | Ensure CNI plugins are present
|
||||
tags: ["cni"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.cni_plugins .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni_plugins .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
exit 1
|
||||
fi
|
||||
# Attempt to download CNI plugins
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni_plugins .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "Failed to download CNI plugins. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cni_plugins .item }}
|
||||
fi
|
||||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .cni_plugins_version | empty | not
|
||||
|
||||
- name: Check binaries for helm
|
||||
- name: Binary | Ensure Helm binary is present
|
||||
tags: ["helm"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.helm .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/helm/{{ .helm_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download Helm binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download Helm binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }}
|
||||
|
|
@ -90,17 +90,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .helm_version | empty | not
|
||||
|
||||
- name: Check binaries for crictl
|
||||
- name: Binary | Ensure crictl binary is present
|
||||
tags: ["crictl"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.crictl .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download crictl binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.crictl .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download crictl binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.crictl .item }}
|
||||
|
|
@ -108,17 +108,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .crictl_version | empty | not
|
||||
|
||||
- name: Check binaries for docker
|
||||
- name: Binary | Ensure Docker binary is present
|
||||
tags: ["docker"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.docker .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/docker/{{ .docker_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download Docker binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download Docker binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker .item }}
|
||||
|
|
@ -126,17 +126,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .docker_version | empty | not
|
||||
|
||||
- name: Check binaries for cridockerd
|
||||
- name: Binary | Ensure cri-dockerd binary is present
|
||||
tags: ["cridockerd"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.cridockerd .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download cri-dockerd binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cridockerd .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download cri-dockerd binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cridockerd .item }}
|
||||
|
|
@ -144,17 +144,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .cridockerd_version | empty | not
|
||||
|
||||
- name: Check binaries for containerd
|
||||
- name: Binary | Ensure containerd binary is present
|
||||
tags: ["containerd"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.containerd .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download containerd binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.containerd .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download containerd binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.containerd .item }}
|
||||
|
|
@ -162,17 +162,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .containerd_version | empty | not
|
||||
|
||||
- name: Check binaries for runc
|
||||
- name: Binary | Ensure runc binary is present
|
||||
tags: ["runc"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.runc .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/runc/{{ .runc_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download runc binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.runc .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download runc binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.runc .item }}
|
||||
|
|
@ -180,17 +180,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .runc_version | empty | not
|
||||
|
||||
- name: Check binaries for calicoctl
|
||||
- name: Binary | Ensure calicoctl binary is present
|
||||
tags: ["calicoctl"]
|
||||
command: |
|
||||
artifact_name=calicoctl
|
||||
artifact_path={{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download calicoctl binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.calicoctl .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download calicoctl binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.calicoctl .item }}
|
||||
|
|
@ -198,17 +198,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .calico_version | empty | not
|
||||
|
||||
- name: Check binaries for registry
|
||||
- name: Binary | Ensure Docker Registry binary is present
|
||||
tags: ["registry"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.docker_registry .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/image-registry/docker-registry/{{ .docker_registry_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download Docker Registry binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker_registry .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download Docker Registry binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker_registry .item }}
|
||||
|
|
@ -216,30 +216,30 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .docker_registry_version | empty | not
|
||||
|
||||
- name: Check binaries for docker-compose
|
||||
- name: Binary | Ensure docker-compose binary is present
|
||||
tags: ["docker-compose"]
|
||||
command: |
|
||||
compose_name=docker-compose
|
||||
compose_path={{ .binary_dir }}/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .item }}
|
||||
if [ ! -f $compose_path/$compose_name ]; then
|
||||
mkdir -p $compose_path
|
||||
# download online
|
||||
# Attempt to download docker-compose binary
|
||||
curl -L -o $compose_path/$compose_name {{ get .artifact.artifact_url.dockercompose .item }}
|
||||
fi
|
||||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .dockercompose_version | empty | not
|
||||
|
||||
- name: Check binaries for harbor
|
||||
- name: Binary | Ensure Harbor binary is present
|
||||
tags: ["harbor"]
|
||||
command: |
|
||||
harbor_name={{ get .artifact.artifact_url.harbor .item | splitList "/" | last }}
|
||||
harbor_path={{ .binary_dir }}/image-registry/harbor/{{ .harbor_version }}/{{ .item }}
|
||||
if [ ! -f $harbor_path/$harbor_name ]; then
|
||||
mkdir -p $harbor_path
|
||||
# download online
|
||||
# Attempt to download Harbor binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.harbor .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download Harbor binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $harbor_path/$harbor_name {{ get .artifact.artifact_url.harbor .item }}
|
||||
|
|
@ -247,17 +247,17 @@
|
|||
loop: "{{ .artifact.arch | toJson }}"
|
||||
when: .harbor_version | empty | not
|
||||
|
||||
- name: Check binaries for keepalived
|
||||
- name: Binary | Ensure keepalived binary is present
|
||||
tags: ["keepalived"]
|
||||
command: |
|
||||
artifact_name={{ get .artifact.artifact_url.keepalived .item | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/image-registry/keepalived/{{ .keepalived_version }}/{{ .item }}
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Attempt to download keepalived binary
|
||||
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.keepalived .item }})
|
||||
if [ $http_code != 200 ]; then
|
||||
echo "http code is $http_code"
|
||||
echo "Failed to download keepalived binary. HTTP status code: $http_code"
|
||||
exit 1
|
||||
fi
|
||||
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.keepalived .item }}
|
||||
|
|
|
|||
|
|
@ -1,69 +1,69 @@
|
|||
---
|
||||
- name: Check binaries for calico
|
||||
- name: Helm | Ensure the Calico binary is available
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.calico | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/calico
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the Calico binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.calico }}
|
||||
fi
|
||||
when: .calico_version | empty | not
|
||||
|
||||
- name: Check binaries for cilium
|
||||
- name: Helm | Ensure the Cilium binary is available
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.cilium | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/cilium
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the Cilium binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.cilium }}
|
||||
fi
|
||||
when: .cilium_version | empty | not
|
||||
|
||||
- name: Check binaries for flannel
|
||||
- name: Helm | Ensure the Flannel binary is available
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.flannel | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/flannel
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the Flannel binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.flannel }}
|
||||
fi
|
||||
when: .flannel_version | empty | not
|
||||
|
||||
- name: Check binaries for kubeovn
|
||||
- name: Helm | Ensure the Kube-OVN binary is available
|
||||
tags: ["kubeovn"]
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.kubeovn | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/kubeovn
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the Kube-OVN binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.kubeovn }}
|
||||
fi
|
||||
when: .kubeovn_version | empty | not
|
||||
|
||||
- name: Check binaries for hybridnet
|
||||
- name: Helm | Ensure the Hybridnet binary is available
|
||||
tags: ["hybridnet"]
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.hybridnet | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/cni/hybridnet
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the Hybridnet binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.hybridnet }}
|
||||
fi
|
||||
when: .hybridnet_version | empty | not
|
||||
|
||||
- name: Check binaries for nfs_provisioner
|
||||
- name: Helm | Ensure the NFS Provisioner binary is available
|
||||
tags: ["nfs_provisioner"]
|
||||
command: |
|
||||
artifact_name={{ .artifact.artifact_url.nfs_provisioner |splitList "/" | last }}
|
||||
artifact_name={{ .artifact.artifact_url.nfs_provisioner | splitList "/" | last }}
|
||||
artifact_path={{ .binary_dir }}/sc
|
||||
if [ ! -f $artifact_path/$artifact_name ]; then
|
||||
mkdir -p $artifact_path
|
||||
# download online
|
||||
# Download the NFS Provisioner binary if it does not exist
|
||||
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.nfs_provisioner }}
|
||||
fi
|
||||
when: .nfs_provisioner_version | empty | not
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Extract artifact to work_dir
|
||||
- name: Artifact | Extract artifact archive to working directory
|
||||
tags: ["always"]
|
||||
command: |
|
||||
if [ -f "{{ .artifact_file }}" ]; then
|
||||
|
|
@ -8,15 +8,15 @@
|
|||
fi
|
||||
when: .artifact_file | empty | not
|
||||
|
||||
- name: Download binaries
|
||||
- name: Artifact | Download required binaries and images
|
||||
when: .artifact_file | empty
|
||||
block:
|
||||
# the binaries which download binary
|
||||
# Download core binaries
|
||||
- include_tasks: download_binary.yaml
|
||||
# the binaries which download helm
|
||||
# Download Helm and CNI binaries
|
||||
- include_tasks: download_helm.yaml
|
||||
# download remote images to local
|
||||
- name: Download images
|
||||
# Download remote images to the local images directory
|
||||
- name: Download container images
|
||||
image:
|
||||
pull:
|
||||
images_dir: >-
|
||||
|
|
@ -25,7 +25,7 @@
|
|||
when:
|
||||
- .image_manifests | default list | empty | not
|
||||
|
||||
- name: Chown work_dir to sudo
|
||||
- name: Artifact | Set ownership of working directory to sudo user
|
||||
tags: ["always"]
|
||||
ignore_errors: true
|
||||
command: |
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Generate root ca file
|
||||
- name: Cert | Generate the root CA certificate file
|
||||
gen_cert:
|
||||
cn: root
|
||||
date: "{{ .certs.ca.date }}"
|
||||
|
|
@ -9,9 +9,9 @@
|
|||
out_cert: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
|
||||
- name: Generate kubernetes ca file
|
||||
- name: Cert | Generate Kubernetes CA certificates
|
||||
block:
|
||||
- name: Generate ca file for kubernetes
|
||||
- name: Cert | Generate the Kubernetes CA certificate file
|
||||
gen_cert:
|
||||
root_key: >-
|
||||
{{ .binary_dir }}/pki/root.key
|
||||
|
|
@ -25,7 +25,7 @@
|
|||
{{ .binary_dir }}/pki/kubernetes.key
|
||||
out_cert: >-
|
||||
{{ .binary_dir }}/pki/kubernetes.crt
|
||||
- name: Generate front-proxy ca file for kubernetes
|
||||
- name: Cert | Generate the front-proxy CA certificate for Kubernetes
|
||||
gen_cert:
|
||||
root_key: >-
|
||||
{{ .binary_dir }}/pki/root.key
|
||||
|
|
@ -40,7 +40,7 @@
|
|||
out_cert: >-
|
||||
{{ .binary_dir }}/pki/front-proxy.crt
|
||||
|
||||
- name: Generate etcd cert file
|
||||
- name: Cert | Generate the etcd certificate file
|
||||
gen_cert:
|
||||
root_key: >-
|
||||
{{ .binary_dir }}/pki/root.key
|
||||
|
|
@ -68,7 +68,7 @@
|
|||
{{ .binary_dir }}/pki/etcd.crt
|
||||
when: .groups.etcd | default list | empty | not
|
||||
|
||||
- name: Generate registry image cert file
|
||||
- name: Cert | Generate the image registry certificate file
|
||||
gen_cert:
|
||||
root_key: >-
|
||||
{{ .binary_dir }}/pki/root.key
|
||||
|
|
@ -99,9 +99,9 @@
|
|||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
when: .groups.image_registry | default list | empty | not
|
||||
|
||||
- name: Chown pki to sudo
|
||||
- name: Cert | Set ownership of the PKI directory to the sudo user
|
||||
block:
|
||||
- name: Chown pki to sudo
|
||||
- name: Cert | Change ownership of the PKI directory to the sudo user
|
||||
ignore_errors: true
|
||||
command: |
|
||||
chown -R ${SUDO_UID}:${SUDO_GID} {{ .binary_dir }}/pki
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
- name: Set local DNS
|
||||
- name: DNS | Configure local DNS entries
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
command: |
|
||||
# clear old dns configuration
|
||||
# Remove any previous Kubekey-managed DNS entries
|
||||
sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' {{ .item }}
|
||||
sed -i '/^$/N;/\n$/N;//D' {{ .item }}
|
||||
# defined new dns configuration
|
||||
# Add updated Kubekey DNS configuration
|
||||
cat >> {{ .item }} <<EOF
|
||||
# kubekey hosts BEGIN
|
||||
# kubernetes hosts
|
||||
# Kubernetes cluster hosts
|
||||
{{- range .groups.k8s_cluster | default list }}
|
||||
{{- $hostname := index $.hostvars . "hostname" -}}
|
||||
{{- $clusterName := $.kubernetes.cluster_name | default "kubekey" -}}
|
||||
|
|
@ -19,7 +19,7 @@
|
|||
{{ index $.hostvars . "internal_ipv6" }} {{ $hostname }} {{ printf "%s.%s" $hostname $clusterName }} {{ printf "%s.%s.%s" $hostname $clusterName $dnsDomain }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# etcd hosts
|
||||
# etcd cluster hosts
|
||||
{{- range .groups.etcd | default list }}
|
||||
{{- if (index $.hostvars . "internal_ipv4") | empty | not }}
|
||||
{{ index $.hostvars . "internal_ipv4" }} {{ index $.hostvars . "hostname" }}
|
||||
|
|
@ -28,7 +28,7 @@
|
|||
{{ index $.hostvars . "internal_ipv6" }} {{ index $.hostvars . "hostname" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# image registry hosts
|
||||
# Image registry hosts
|
||||
{{- range .groups.image_registry | default list }}
|
||||
{{- if (index $.hostvars . "internal_ipv4") | empty | not }}
|
||||
{{ index $.hostvars . "internal_ipv4" }} {{ index $.hostvars . "hostname" }}
|
||||
|
|
@ -47,16 +47,16 @@
|
|||
{{- if (index .hostvars (.groups.image_registry | first) "internal_ipv6") | empty | not }}
|
||||
{{ index .hostvars (.groups.image_registry | first) "internal_ipv6" }} {{ .image_registry.auth.registry }}
|
||||
{{- end }}
|
||||
{{ .image_registry.auth.registry }}
|
||||
{{ .image_registry.auth.registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# nfs hosts
|
||||
# NFS server hosts
|
||||
{{- range .groups.nfs | default list }}
|
||||
{{- if (index $.hostvars . "internal_ipv4") | empty | not }}
|
||||
{{ index $.hostvars . "internal_ipv4" }} {{ index $.hostvars . "hostname" }}
|
||||
{{- end }}
|
||||
{{- if (index $.hostvars . "internal_ipv6") | empty | not }}
|
||||
{{ index $.hostvars . "internal_ipv4" }} {{ index $.hostvars . "hostname" }}
|
||||
{{ index $.hostvars . "internal_ipv6" }} {{ index $.hostvars . "hostname" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# kubekey hosts END
|
||||
|
|
|
|||
|
|
@ -1,17 +1,21 @@
|
|||
---
|
||||
- name: Configure ntp server
|
||||
- name: NTP | Configure NTP server
|
||||
command: |
|
||||
# Determine the correct chrony configuration file based on OS type
|
||||
chronyConfigFile={{ if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}"/etc/chrony/chrony.conf"{{ else }}"/etc/chrony.conf"{{ end }}
|
||||
# clear old server
|
||||
|
||||
# Remove any existing server, allow, or local entries to ensure a clean configuration
|
||||
sed -i '/^server/d' $chronyConfigFile
|
||||
sed -i 's/^pool /#pool /g' $chronyConfigFile
|
||||
sed -i '/^allow/d' $chronyConfigFile
|
||||
sed -i '/^local/d' $chronyConfigFile
|
||||
# add base config
|
||||
|
||||
# Add base configuration to allow all clients and set local stratum
|
||||
echo "allow 0.0.0.0/0" >> $chronyConfigFile
|
||||
echo "allow ::/0" >> $chronyConfigFile
|
||||
echo "local stratum 10" >> $chronyConfigFile
|
||||
# add server config
|
||||
|
||||
# Add NTP server entries
|
||||
{{- range $server := .ntp.servers }}
|
||||
{{- $internalIPv4 := "" }}
|
||||
{{- $internalIPv6 := "" }}
|
||||
|
|
@ -21,7 +25,7 @@
|
|||
{{- $internalIPv6 = .internal_ipv6 | default "" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# add ntp server: {{ $server }}
|
||||
# Configuring NTP server: {{ $server }}
|
||||
{{- if $internalIPv4 | empty | not }}
|
||||
grep -q '^server {{ $internalIPv4 }} iburst' $chronyConfigFile || sed '1a server {{ $internalIPv4 }} iburst' -i $chronyConfigFile
|
||||
{{- end }}
|
||||
|
|
@ -30,19 +34,19 @@
|
|||
{{- end }}
|
||||
{{- if and ($internalIPv4 | empty) ($internalIPv6 | empty) }}
|
||||
grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
when:
|
||||
- .ntp.enabled
|
||||
- .ntp.servers | empty | not
|
||||
|
||||
- name: Set timezone
|
||||
- name: Timezone | Set system timezone and NTP synchronization
|
||||
command: |
|
||||
timedatectl set-timezone {{ .timezone }}
|
||||
timedatectl set-ntp {{ and .ntp.enabled (.ntp.servers | empty | not) }}
|
||||
when: or (and .ntp.enabled (.ntp.servers | empty | not)) (.timezone | empty | not)
|
||||
|
||||
- name: Restart ntp server
|
||||
- name: NTP | Restart NTP service
|
||||
command: |
|
||||
{{- if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}
|
||||
systemctl restart chrony.service
|
||||
|
|
|
|||
|
|
@ -1,28 +1,28 @@
|
|||
---
|
||||
- name: Sync repository
|
||||
- name: Kubekey Repository | Synchronize local repository ISO image
|
||||
block:
|
||||
- name: Sync repository file
|
||||
- name: Kubekey Repository | Copy local repository ISO file
|
||||
ignore_errors: true
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type }}.iso
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/repository.iso
|
||||
- name: Mount iso file
|
||||
- name: Kubekey Repository | Mount repository ISO to temporary directory
|
||||
command: |
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
|
||||
mount -t iso9660 -o loop {{ .tmp_dir }}/repository.iso {{ .tmp_dir }}/iso
|
||||
fi
|
||||
rescue:
|
||||
- name: Unmount iso file
|
||||
- name: Kubekey Repository | Unmount repository ISO from temporary directory
|
||||
command: |
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
|
||||
umount {{ .tmp_dir }}/iso
|
||||
fi
|
||||
|
||||
- name: Init repository
|
||||
- name: Kubekey Repository | Initialize package repositories and install system dependencies
|
||||
block:
|
||||
- name: Init debian repository
|
||||
- name: Kubekey Repository | Initialize Debian-based repository and install required system packages
|
||||
command: |
|
||||
now=$(date +"%Y-%m-%d %H:%M:%S")
|
||||
PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}"
|
||||
|
|
@ -32,32 +32,33 @@
|
|||
dpkg -s $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg"
|
||||
fi
|
||||
done
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ];then
|
||||
# backup
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
|
||||
# Backup current APT sources
|
||||
mv /etc/apt/sources.list /etc/apt/sources.list.kubekey-$now.bak
|
||||
mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey-$now.bak
|
||||
mkdir -p /etc/apt/sources.list.d
|
||||
# add repository
|
||||
# Configure local repository
|
||||
rm -rf /etc/apt/sources.list.d/*
|
||||
echo 'deb [trusted=yes] file://{{ .tmp_dir }}/iso /' > /etc/apt/sources.list.d/kubekey.list
|
||||
# update repository
|
||||
echo 'deb [trusted=yes] file://{{ .tmp_dir }}/iso /' > /etc/apt/sources.list.d/kubekey.list
|
||||
# Update package index
|
||||
apt-get update
|
||||
# install
|
||||
# Install missing packages
|
||||
if [ -n "$PKGS_TO_INSTALL" ]; then
|
||||
apt install -y $PKGS_TO_INSTALL
|
||||
fi
|
||||
# reset repository
|
||||
# Restore original APT sources
|
||||
rm -rf /etc/apt/sources.list.d
|
||||
mv /etc/apt/sources.list.kubekey.bak-$now /etc/apt/sources.list
|
||||
mv /etc/apt/sources.list.d.kubekey.bak-$now /etc/apt/sources.list.d
|
||||
else
|
||||
# No local ISO found, using default repositories
|
||||
apt-get update
|
||||
if [ -n "$PKGS_TO_INSTALL" ]; then
|
||||
apt install -y $PKGS_TO_INSTALL
|
||||
fi
|
||||
fi
|
||||
when: .os.release.ID_LIKE | eq "debian"
|
||||
- name: Init rhel repository
|
||||
- name: Kubekey Repository | Initialize RHEL-based repository and install required system packages
|
||||
command: |
|
||||
now=$(date +"%Y-%m-%d %H:%M:%S")
|
||||
PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}"
|
||||
|
|
@ -66,34 +67,31 @@
|
|||
if [ -n "$pkg" ]; then
|
||||
rpm -q $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg"
|
||||
fi
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ];then
|
||||
# backup
|
||||
done
|
||||
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
|
||||
# Backup current YUM repositories
|
||||
mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak-$now
|
||||
mkdir -p /etc/yum.repos.d
|
||||
# add repository
|
||||
# Configure local repository
|
||||
rm -rf /etc/yum.repos.d/*
|
||||
cat <<EOF > /etc/yum.repos.d/CentOS-local.repo
|
||||
[base-local]
|
||||
name=rpms-local
|
||||
|
||||
name=Local RPM Repository
|
||||
baseurl=file://{{ .tmp_dir }}/repository.iso
|
||||
|
||||
enabled=1
|
||||
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
EOF
|
||||
# update repository
|
||||
# Refresh repository cache
|
||||
yum clean all && yum makecache
|
||||
# install
|
||||
# Install missing packages
|
||||
if [ -n "$PKGS_TO_INSTALL" ]; then
|
||||
yum install -y $PKGS_TO_INSTALL
|
||||
fi
|
||||
# reset repository
|
||||
# Restore original YUM repositories
|
||||
rm -rf /etc/yum.repos.d
|
||||
mv /etc/yum.repos.d.kubekey.bak-$now /etc/yum.repos.d
|
||||
else
|
||||
# install
|
||||
# No local ISO found, using default repositories
|
||||
if [ -n "$PKGS_TO_INSTALL" ]; then
|
||||
yum install -y $PKGS_TO_INSTALL
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
- name: Init for new kubernetes nodes
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
- name: OS | Initialize new Kubernetes nodes
|
||||
when: .kubernetes_install_LoadState.stdout | eq "not-found"
|
||||
block:
|
||||
- include_tasks: init_repository.yaml
|
||||
- name: Reset tmp dir
|
||||
- name: OS | Reset temporary directory
|
||||
command: |
|
||||
if [ -d {{ .tmp_dir }} ]; then
|
||||
rm -rf {{ .tmp_dir }}
|
||||
fi
|
||||
mkdir -m 777 -p {{ .tmp_dir }}
|
||||
- name: Set hostname
|
||||
- name: OS | Set system hostname
|
||||
command: |
|
||||
hostnamectl set-hostname {{ .inventory_hostname }} \
|
||||
&& sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_hostname }}/g' {{ .item }}
|
||||
|
|
@ -17,16 +17,16 @@
|
|||
- .set_hostname
|
||||
- .inventory_hostname | ne "localhost"
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
- name: Sync init os to remote
|
||||
- name: OS | Synchronize initialization script to remote node
|
||||
template:
|
||||
src: init-os.sh
|
||||
dest: /etc/kubekey/scripts/init-os.sh
|
||||
mode: 0755
|
||||
- name: Execute init os script
|
||||
- name: OS | Execute initialization script on remote node
|
||||
command: |
|
||||
/etc/kubekey/scripts/init-os.sh
|
||||
|
||||
- name: Init for all nodes always
|
||||
- name: OS | Always perform initialization steps for all nodes
|
||||
block:
|
||||
- include_tasks: init_ntpserver.yaml
|
||||
- include_tasks: init_localdns.yaml
|
||||
|
|
@ -1,32 +1,33 @@
|
|||
---
|
||||
- name: Check if calicoctl is installed
|
||||
- name: Calico | calico-check-calicoctl-installed
|
||||
ignore_errors: true
|
||||
command: calicoctl version
|
||||
register: calicoctl_install_version
|
||||
register_type: yaml
|
||||
- name: Install calicoctl
|
||||
when: .calicoctl_install_version.stderr | empty | not
|
||||
|
||||
- name: Calico | calico-install-calicoctl-if-missing
|
||||
when: .calicoctl_install_version.error | empty | not
|
||||
block:
|
||||
- name: Sync calicoctl to remote
|
||||
- name: Calico | calico-sync-calicoctl-to-remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .binary_type }}/calicoctl
|
||||
dest: /usr/local/bin/calicoctl
|
||||
mode: 0755
|
||||
|
||||
- name: Sync calico package to remote
|
||||
- name: Calico | calico-sync-calico-package-to-remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/calico/tigera-operator-{{ .calico_version }}.tgz
|
||||
dest: >-
|
||||
/etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz
|
||||
|
||||
- name: Generate calico custom value file
|
||||
- name: Calico | calico-generate-custom-values-file
|
||||
copy:
|
||||
content: |
|
||||
{{ .cni.calico.values }}
|
||||
dest: /etc/kubernetes/cni/calico-values.yaml
|
||||
|
||||
- name: Apply calico
|
||||
- name: Calico | calico-apply-helm-chart
|
||||
command: |
|
||||
helm install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml
|
||||
helm upgrade --install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml
|
||||
|
|
|
|||
|
|
@ -1,25 +1,25 @@
|
|||
---
|
||||
- name: Sync cilium cli package
|
||||
- name: Cilium | Ensure cilium CLI package is present
|
||||
when: .ciliumcli_version | empty | not
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/cilium/ciliumcli-{{ .ciliumcli_version }}/{{ .item }}
|
||||
dest: /usr/local/bin/cilium
|
||||
|
||||
- name: Sync cilium helm chart package
|
||||
- name: Cilium | Ensure cilium Helm chart package is present
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/cilium/cilium-{{ .cilium_version }}.tgz
|
||||
dest: >-
|
||||
/etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz
|
||||
|
||||
- name: Sync cilium helm chart custom value file
|
||||
- name: Cilium | Generate cilium Helm custom values file
|
||||
copy:
|
||||
content: |
|
||||
{{ .cni.cilium.values }}
|
||||
dest: /etc/kubernetes/cni/cilium-values.yaml
|
||||
|
||||
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
|
||||
- name: Install cilium
|
||||
# Reference: https://docs.cilium.io/en/stable/installation/k8s-install-helm/
|
||||
- name: Cilium | Install cilium using Helm
|
||||
command: |
|
||||
helm install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml
|
||||
helm upgrade --install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml
|
||||
|
|
|
|||
|
|
@ -1,17 +1,18 @@
|
|||
---
|
||||
# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
|
||||
- name: Sync flannel package to remote
|
||||
# For more information, see: https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
|
||||
|
||||
- name: Flannel | Sync flannel package to remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/flannel/flannel.tgz
|
||||
dest: /etc/kubernetes/cni/flannel.tgz
|
||||
|
||||
- name: Generate flannel custom value file
|
||||
- name: Flannel | Generate flannel custom values file
|
||||
copy:
|
||||
content: |
|
||||
{{ .cni.flannel.values }}
|
||||
dest: /etc/kubernetes/cni/flannel-values.yaml
|
||||
|
||||
- name: Apply flannel
|
||||
- name: Flannel | Install flannel using Helm
|
||||
command: |
|
||||
helm install --create-namespace --namespace kube-flannel flannel /etc/kubernetes/cni/flannel.tgz -f /etc/kubernetes/cni/flannel-values.yaml
|
||||
helm upgrade --install --create-namespace --namespace kube-flannel flannel /etc/kubernetes/cni/flannel.tgz -f /etc/kubernetes/cni/flannel-values.yaml
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
---
|
||||
- name: Sync hybridnet helm chart to remote
|
||||
- name: Hybridnet | Synchronize Hybridnet Helm chart package to remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/hybridnet-{{ .hybridnet_version }}.tgz
|
||||
dest: >-
|
||||
/etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz
|
||||
|
||||
- name: Generate hybridnet custom value file
|
||||
- name: Hybridnet | Generate Hybridnet custom values file
|
||||
copy:
|
||||
content: |
|
||||
{{ .cni.hybridnet.values }}
|
||||
dest: /etc/kubernetes/cni/hybridnet-values.yaml
|
||||
|
||||
# https://artifacthub.io/packages/helm/hybridnet/hybridnet
|
||||
- name: Install hybridnet
|
||||
# Reference: https://artifacthub.io/packages/helm/hybridnet/hybridnet
|
||||
- name: Hybridnet | Install Hybridnet using Helm
|
||||
command: |
|
||||
helm install --namespace kube-system hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz -f /etc/kubernetes/cni/hybridnet-values.yaml
|
||||
helm upgrade --install --namespace kube-system hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz -f /etc/kubernetes/cni/hybridnet-values.yaml
|
||||
|
|
|
|||
|
|
@ -1,27 +1,27 @@
|
|||
---
|
||||
- name: Sync kubeovn package to remote
|
||||
- name: Kubeovn | Synchronize Kube-OVN Helm chart package to remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/kubeovn/kubeovn-{{ .kubeovn_version }}.tgz
|
||||
dest: >-
|
||||
/etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
|
||||
|
||||
- name: Generate kubeovn custom value file
|
||||
- name: Kubeovn | Generate Kube-OVN custom values file
|
||||
copy:
|
||||
content: |
|
||||
{{ .cni.kubeovn.values }}
|
||||
dest: /etc/kubernetes/cni/kubeovn-values.yaml
|
||||
|
||||
- name: Add kubeovn label to node
|
||||
- name: Kubeovn | Add Kube-OVN labels to nodes
|
||||
command: |
|
||||
kubectl label node -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite
|
||||
kubectl label node -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite
|
||||
|
||||
- name: Apply kubeovn
|
||||
- name: Kubeovn | Install Kube-OVN using Helm with custom values
|
||||
command: |
|
||||
helm install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz -f /etc/kubernetes/cni/kubeovn-values.yaml
|
||||
helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz -f /etc/kubernetes/cni/kubeovn-values.yaml
|
||||
|
||||
# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
|
||||
- name: Install kubeovn
|
||||
# Reference: https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
|
||||
- name: Kubeovn | Install Kube-OVN using Helm
|
||||
command: |
|
||||
helm install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
|
||||
helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: Generate multus yaml
|
||||
- name: Multus | Generate Multus configuration YAML file
|
||||
template:
|
||||
src: multus/multus.yaml
|
||||
desc: /etc/kubernetes/cni/multus.yaml
|
||||
dest: /etc/kubernetes/cni/multus.yaml
|
||||
|
||||
- name: Apply multus
|
||||
- name: Multus | Apply Multus configuration to the cluster
|
||||
command: |
|
||||
kubectl apply -f /etc/kubernetes/cni/multus.yaml
|
||||
|
|
|
|||
|
|
@ -1,60 +1,62 @@
|
|||
---
|
||||
- name: Check if runc is installed
|
||||
- name: Containerd | Verify if runc is installed on the system
|
||||
ignore_errors: true
|
||||
command: runc --version
|
||||
register: runc_install_version
|
||||
- name: Sync runc binary to remote
|
||||
when: or (.runc_install_version.stderr | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
|
||||
|
||||
- name: Containerd | Ensure the runc binary is present on the remote node
|
||||
when: or (.runc_install_version.error | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/runc/{{ .runc_version }}/{{ .binary_type }}/runc.{{ .binary_type }}
|
||||
dest: /usr/local/bin/runc
|
||||
mode: 0755
|
||||
|
||||
- name: Check if containerd is installed
|
||||
- name: Containerd | Check if containerd is installed on the system
|
||||
ignore_errors: true
|
||||
command: containerd --version
|
||||
register: containerd_install_version
|
||||
- name: Install containerd
|
||||
when: or (.containerd_install_version.stderr | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
|
||||
|
||||
- name: Containerd | Install and configure containerd if not present or version mismatch
|
||||
when: or (.containerd_install_version.error | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
|
||||
block:
|
||||
- name: Sync containerd binary to remote
|
||||
- name: Containerd | Copy containerd binary archive to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .binary_type }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz
|
||||
- name: Unpackage containerd binary
|
||||
- name: Containerd | Extract containerd binaries to /usr/local/bin
|
||||
command: |
|
||||
tar -xvf {{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz --strip-components=1 -C /usr/local/bin/
|
||||
- name: Generate containerd config file
|
||||
- name: Containerd | Generate the containerd configuration file
|
||||
template:
|
||||
src: config.toml
|
||||
dest: /etc/containerd/config.toml
|
||||
- name: Generate containerd Service file
|
||||
- name: Containerd | Deploy the containerd systemd service file
|
||||
copy:
|
||||
src: containerd.service
|
||||
dest: /etc/systemd/system/containerd.service
|
||||
- name: Start containerd
|
||||
- name: Containerd | Start and enable the containerd service
|
||||
command: |
|
||||
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
|
||||
|
||||
- name: Sync image registry tls to remote
|
||||
- name: Containerd | Synchronize image registry TLS certificates to the remote node
|
||||
when: .groups.image_registry | default list | empty | not
|
||||
block:
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: Containerd | Copy image registry CA certificate to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: >-
|
||||
/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: Containerd | Copy image registry server certificate to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt
|
||||
- name: Sync image registry key file to remote
|
||||
- name: Containerd | Copy image registry server key to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
|
|
|
|||
|
|
@ -1,22 +1,22 @@
|
|||
---
|
||||
- name: Check if crictl is installed
|
||||
- name: Crictl | Verify if crictl is installed on the system
|
||||
ignore_errors: true
|
||||
command: crictl --version
|
||||
register: crictl_install_version
|
||||
|
||||
- name: Install crictl
|
||||
when: or (.crictl_install_version.stderr | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
|
||||
- name: Crictl | Install and configure crictl if not present or version mismatch
|
||||
when: or (.crictl_install_version.error | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
|
||||
block:
|
||||
- name: Sync crictl binary to remote
|
||||
- name: Crictl | Copy crictl binary archive to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .binary_type }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
- name: Unpackage crictl binary
|
||||
- name: Crictl | Extract crictl binary to /usr/local/bin
|
||||
command: |
|
||||
tar -xvf {{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin/
|
||||
- name: Generate crictl config file
|
||||
- name: Crictl | Generate crictl configuration file
|
||||
template:
|
||||
src: crictl.yaml
|
||||
dest: /etc/crictl.yaml
|
||||
|
|
|
|||
|
|
@ -1,25 +1,25 @@
|
|||
---
|
||||
- name: Check if cri-dockerd is installed
|
||||
- name: Cridockerd | Check if cri-dockerd is installed on the system
|
||||
ignore_errors: true
|
||||
command: cri-dockerd --version
|
||||
register: cridockerd_install_version
|
||||
|
||||
- name: Install cri-dockerd
|
||||
when: or (.cridockerd_install_version.stderr | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
|
||||
- name: Cridockerd | Install and configure cri-dockerd if not present or version mismatch
|
||||
when: or (.cridockerd_install_version.error | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
|
||||
block:
|
||||
- name: Sync cri-dockerd Binary to remote
|
||||
- name: Cridockerd | Copy cri-dockerd binary archive to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz
|
||||
- name: Unpackage cri-dockerd binary
|
||||
- name: Cridockerd | Extract cri-dockerd binary to /usr/local/bin
|
||||
command: |
|
||||
tar -xvf {{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz --strip-components=1 -C /usr/local/bin/
|
||||
- name: Generate cri-dockerd Service file
|
||||
- name: Cridockerd | Generate cri-dockerd systemd service file
|
||||
template:
|
||||
src: cri-dockerd.service
|
||||
dest: /etc/systemd/system/cri-dockerd.service
|
||||
- name: Start cri-dockerd service
|
||||
- name: Cridockerd | Start and enable the cri-dockerd service
|
||||
command: |
|
||||
systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
|
||||
|
|
|
|||
|
|
@ -1,59 +1,59 @@
|
|||
---
|
||||
# install cridockerd
|
||||
# Docker | Install cri-dockerd if required for Kubernetes >= v1.24.0
|
||||
- include_tasks: cridockerd.yaml
|
||||
when:
|
||||
- .kube_version | semverCompare ">=v1.24.0"
|
||||
- .kube_version | semverCompare ">=v1.24.0"
|
||||
|
||||
- name: Check if docker is installed
|
||||
- name: Docker | Check if Docker is installed on the system
|
||||
ignore_errors: true
|
||||
command: docker --version
|
||||
register: docker_install_version
|
||||
|
||||
- name: Install docker
|
||||
when: or (.docker_install_version.stderr | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
- name: Docker | Install and configure Docker if not present or version mismatch
|
||||
when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
block:
|
||||
- name: Sync docker binary to remote
|
||||
- name: Docker | Copy Docker binary archive to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/docker/{{ .docker_version }}/{{ .binary_type }}/docker-{{ .docker_version }}.tgz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/docker-{{ .docker_version }}.tgz
|
||||
- name: Unpackage docker binary
|
||||
- name: Docker | Extract Docker binaries to /usr/local/bin
|
||||
command: |
|
||||
tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz --wildcards docker/*
|
||||
- name: Generate docker config file
|
||||
- name: Docker | Generate Docker configuration file
|
||||
template:
|
||||
src: daemon.json
|
||||
dest: /etc/docker/daemon.json
|
||||
- name: Generate docker service file
|
||||
- name: Docker | Deploy the Docker systemd service file
|
||||
copy:
|
||||
src: docker.service
|
||||
dest: /etc/systemd/system/docker.service
|
||||
- name: Generate containerd service file
|
||||
- name: Docker | Deploy the containerd systemd service file
|
||||
copy:
|
||||
src: containerd.service
|
||||
dest: /etc/systemd/system/containerd.service
|
||||
- name: Start docker service
|
||||
- name: Docker | Start and enable Docker and containerd services
|
||||
command: |
|
||||
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
|
||||
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
|
||||
|
||||
- name: Sync image registry tls to remote
|
||||
- name: Docker | Synchronize image registry TLS certificates to the remote node
|
||||
when: .groups.image_registry | default list | empty | not
|
||||
block:
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: Docker | Copy image registry CA certificate to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: >-
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/ca.crt
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: Docker | Copy image registry client certificate to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.cert
|
||||
- name: Sync image registry key file to remote
|
||||
- name: Docker | Copy image registry client key to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
|
|
|
|||
|
|
@ -1,21 +1,21 @@
|
|||
---
|
||||
- name: Sync custom backup etcd script
|
||||
- name: Backup | Synchronize custom etcd backup script
|
||||
template:
|
||||
src: >-
|
||||
{{ .etcd.backup.etcd_backup_script }}
|
||||
dest: /usr/local/bin/kube-scripts/backup_etcd.sh
|
||||
mode: 777
|
||||
|
||||
- name: Generate backup etcd service
|
||||
- name: Backup | Deploy systemd service for etcd backup
|
||||
copy:
|
||||
src: backup.service
|
||||
dest: /etc/systemd/system/backup-etcd.service
|
||||
|
||||
- name: Generate backup etcd timer
|
||||
- name: Backup | Deploy systemd timer for scheduled etcd backup
|
||||
template:
|
||||
src: backup.timer
|
||||
dest: /etc/systemd/system/backup-etcd.timer
|
||||
|
||||
- name: Enable etcd timer
|
||||
- name: Backup | Reload systemd and enable etcd backup timer
|
||||
command: |
|
||||
systemctl daemon-reload && systemctl enable --now backup-etcd.timer
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
- name: Expansion | Expand cluster on existing etcd nodes
|
||||
when: .etcd_install_LoadState.stdout | eq "loaded"
|
||||
block:
|
||||
- name: Expansion | Update /etc/etcd.env configuration file
|
||||
template:
|
||||
src: etcd.env
|
||||
dest: /etc/etcd.env
|
||||
- name: Expansion | Restart etcd service
|
||||
command: |
|
||||
systemctl restart etcd.service
|
||||
- name: Expansion | Verify etcd service becomes healthy within 1 minute
|
||||
command: |
|
||||
for ((i=1; i<=12; i++)); do
|
||||
if ETCDCTL_API=3 etcdctl \
|
||||
--endpoints=https://localhost:2379 \
|
||||
--cacert=/etc/ssl/etcd/ssl/ca.crt \
|
||||
--cert=/etc/ssl/etcd/ssl/server.crt \
|
||||
--key=/etc/ssl/etcd/ssl/server.key \
|
||||
endpoint health >/dev/null 2>&1; then
|
||||
echo "✅ etcd is health"
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
echo "❌ etcd etcd is not health in 1 minute"
|
||||
exit 1
|
||||
|
||||
- name: Expansion | Add new etcd member from non-installed node
|
||||
when: .etcd_install_LoadState.stdout | eq "not-found"
|
||||
delegate_to: "{{ .installed_etcd }}"
|
||||
command: |
|
||||
ETCDCTL_API=3 etcdctl \
|
||||
--endpoints=https://localhost:2379 \
|
||||
--cacert=/etc/ssl/etcd/ssl/ca.crt \
|
||||
--cert=/etc/ssl/etcd/ssl/server.crt \
|
||||
--key=/etc/ssl/etcd/ssl/server.key \
|
||||
member add {{ .inventory_hostname }}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
- name: Install | Initialize etcd environment
|
||||
block:
|
||||
- name: Install | Create etcd system user
|
||||
command: |
|
||||
id etcd &>/dev/null || useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
|
||||
- name: Install | Create etcd data directory and set ownership
|
||||
command: |
|
||||
if [ ! -d "{{ .item }}" ]; then
|
||||
mkdir -p {{ .item }} && chown -R etcd {{ .item }}
|
||||
fi
|
||||
loop:
|
||||
- "{{ .etcd.env.data_dir }}"
|
||||
|
||||
- name: Install | Generate etcd environment configuration file
|
||||
template:
|
||||
src: etcd.env
|
||||
dest: /etc/etcd.env
|
||||
|
||||
- name: Install | Deploy etcd systemd service file
|
||||
copy:
|
||||
src: etcd.service
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
|
||||
# refer: https://etcd.io/docs/v3.5/tuning/
|
||||
- name: Install | Set CPU governor to performance mode
|
||||
command: |
|
||||
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
when: .etcd.performance
|
||||
|
||||
- name: Install | Configure network traffic priority for etcd
|
||||
command: |
|
||||
tc qdisc add dev eth0 root handle 1: prio bands 3
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1
|
||||
when: .etcd.traffic_priority
|
||||
|
||||
- name: Install | Start and enable etcd systemd service
|
||||
command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
---
|
||||
- name: Sync etcd binary to node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/etcd/{{ .etcd_version }}/{{ .binary_type }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
|
||||
- name: Extract etcd binary
|
||||
command: |
|
||||
tar --strip-components=1 -C /usr/local/bin/ -xvf {{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz \
|
||||
--wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type }}/etcd*
|
||||
|
||||
- name: Sync ca file to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: /etc/ssl/etcd/ssl/ca.crt
|
||||
|
||||
- name: Sync etcd cert file to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.crt
|
||||
dest: /etc/ssl/etcd/ssl/server.crt
|
||||
|
||||
- name: Sync etcd key file to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.key
|
||||
dest: /etc/ssl/etcd/ssl/server.key
|
||||
|
||||
- name: Generate etcd env file
|
||||
template:
|
||||
src: etcd.env
|
||||
dest: /etc/etcd.env
|
||||
|
||||
- name: Generate etcd systemd service file
|
||||
copy:
|
||||
src: etcd.service
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
|
||||
# refer: https://etcd.io/docs/v3.5/tuning/
|
||||
- name: Set cpu to performance
|
||||
command: |
|
||||
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
when: .etcd.performance
|
||||
|
||||
- name: Set Traffic Priority
|
||||
command: |
|
||||
tc qdisc add dev eth0 root handle 1: prio bands 3
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1
|
||||
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1
|
||||
when: .etcd.traffic_priority
|
||||
|
||||
- name: Start etcd service
|
||||
command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
|
||||
|
|
@ -1,18 +1,20 @@
|
|||
---
|
||||
- name: Install etcd
|
||||
when: .etcd_install_version.stderr | empty | not
|
||||
- include_tasks: prepare.yaml
|
||||
|
||||
- name: ETCD | Upgrade etcd if a newer version is available
|
||||
when:
|
||||
- .etcd_install_LoadState.stdout | eq "loaded"
|
||||
- .etcd_version | semverCompare (printf ">v%s" (index .etcd_install_version "stdout" "etcd Version"))
|
||||
include_tasks: upgrade.yaml
|
||||
|
||||
- name: ETCD | Expand the etcd cluster by adding new nodes if required
|
||||
when:
|
||||
- .installed_etcd | empty | not
|
||||
- .need_installed_etcd | fromJson | empty | not
|
||||
include_tasks: expansion.yaml
|
||||
|
||||
- name: ETCD | Install etcd and set up the backup service if not already present
|
||||
when: .etcd_install_LoadState.stdout | eq "not-found"
|
||||
block:
|
||||
- name: Init etcd
|
||||
block:
|
||||
- name: Add etcd user
|
||||
command: |
|
||||
id etcd &>/dev/null || useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd
|
||||
- name: Create etcd directories
|
||||
command: |
|
||||
if [ ! -d "{{ .item }}" ]; then
|
||||
mkdir -p {{ .item }} && chown -R etcd {{ .item }}
|
||||
fi
|
||||
loop:
|
||||
- "{{ .etcd.env.data_dir }}"
|
||||
- include_tasks: install_etcd.yaml
|
||||
- include_tasks: backup_etcd.yaml
|
||||
- include_tasks: install.yaml
|
||||
- include_tasks: backup_service.yaml
|
||||
|
|
|
|||
|
|
@ -0,0 +1,99 @@
|
|||
|
||||
- name: Prepare | Check etcd.service status in systemd
|
||||
block:
|
||||
- name: Prepare | Get etcd.service LoadState and save to variable
|
||||
command: systemctl show etcd.service -p LoadState --value
|
||||
register: etcd_install_LoadState
|
||||
- name: Prepare | Get etcd.service ActiveState and save to variable
|
||||
command: systemctl show etcd.service -p ActiveState --value
|
||||
register: etcd_install_ActiveState
|
||||
- name: Prepare | Ensure installed etcd is running and healthy
|
||||
when: .etcd_install_LoadState.stdout | eq "loaded"
|
||||
assert:
|
||||
that: .etcd_install_ActiveState.stdout | eq "active"
|
||||
fail_msg: >-
|
||||
etcd service is installed but not running
|
||||
|
||||
- name: Prepare | Set etcd node parameters
|
||||
block:
|
||||
- name: Prepare | Set etcd state to existing if already installed
|
||||
when: .etcd_install_LoadState.stdout | eq "loaded"
|
||||
set_fact:
|
||||
etcd:
|
||||
state: existing
|
||||
- name: Prepare | Identify nodes with installed or missing etcd
|
||||
run_once: true
|
||||
add_hostvars:
|
||||
hosts: etcd
|
||||
vars:
|
||||
installed_etcd: >-
|
||||
{{- $needInstalled := list -}}
|
||||
{{- range .groups.etcd -}}
|
||||
{{- if (index $.hostvars . "etcd_install_LoadState" "stdout") | eq "loaded" -}}
|
||||
{{- $needInstalled = append $needInstalled . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{ $needInstalled | first | default "" }}
|
||||
need_installed_etcd: >-
|
||||
{{- $needInstalled := list -}}
|
||||
{{- range .groups.etcd -}}
|
||||
{{- if (index $.hostvars . "etcd_install_LoadState" "stdout") | eq "not-found" -}}
|
||||
{{- $needInstalled = append $needInstalled . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{ $needInstalled | toJson }}
|
||||
|
||||
- name: Prepare | Check installed etcd version
|
||||
when: .etcd_install_LoadState.stdout | eq "loaded"
|
||||
block:
|
||||
- name: Prepare | Get installed etcd version
|
||||
command: etcd --version
|
||||
register: etcd_install_version
|
||||
register_type: yaml
|
||||
- name: Prepare | Ensure target etcd version is not lower than installed version
|
||||
when: .etcd_install_LoadState.stdout | eq "loaded"
|
||||
assert:
|
||||
that: .etcd_version | semverCompare (printf ">=v%s" (index .etcd_install_version "stdout" "etcd Version"))
|
||||
fail_msg: >-
|
||||
Installed etcd version: {{ index .etcd_install_version "stdout" "etcd Version" }} is lower than target etcd version: {{ .etcd_version }}
|
||||
|
||||
- name: Prepare | Synchronize etcd package to node if new install or upgrade
|
||||
when:
|
||||
- .etcd_install_version.error | empty
|
||||
- or (eq .etcd_install_version.stdout "skip") (eq .etcd_version (printf ">=v%s" (index .etcd_install_version "stdout" "etcd Version")))
|
||||
block:
|
||||
- name: Prepare | Copy etcd binary package to remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/etcd/{{ .etcd_version }}/{{ .binary_type }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
- name: Prepare | Extract etcd binary package to /usr/local/bin/
|
||||
command: |
|
||||
tar --strip-components=1 -C /usr/local/bin/ -xvf {{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz \
|
||||
--wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type }}/etcd*
|
||||
|
||||
- name: Prepare | Synchronize certificates to node for new install or expansion
|
||||
when: >-
|
||||
or
|
||||
(eq .etcd_install_version.stdout "skip")
|
||||
(and
|
||||
(.installed_etcd | empty | not)
|
||||
(.need_installed_etcd | fromJson | empty | not)
|
||||
)
|
||||
block:
|
||||
- name: Prepare | Copy CA certificate to etcd node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: /etc/ssl/etcd/ssl/ca.crt
|
||||
- name: Prepare | Copy server certificate to etcd node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.crt
|
||||
dest: /etc/ssl/etcd/ssl/server.crt
|
||||
- name: Prepare | Copy server key to etcd node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/etcd.key
|
||||
dest: /etc/ssl/etcd/ssl/server.key
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
- name: Upgrade | Backup etcd data before upgrade
|
||||
command: BACKUP_DIR="{{ .etcd.backup.backup_dir }}/install/etcd-v{{ index .etcd_install_version "stdout" "etcd Version" }}-$(date +%Y-%m-%d-%H-%M-%S)" /usr/local/bin/kube-scripts/backup_etcd.sh
|
||||
|
||||
- name: Upgrade | Restart etcd service after upgrade
|
||||
command: |
|
||||
systemctl restart etcd.service
|
||||
|
||||
- name: Upgrade | Ensure etcd service becomes healthy within 1 minute
|
||||
command: |
|
||||
for ((i=1; i<=12; i++)); do
|
||||
if ETCDCTL_API=3 etcdctl \
|
||||
--endpoints=https://localhost:2379 \
|
||||
--cacert=/etc/ssl/etcd/ssl/ca.crt \
|
||||
--cert=/etc/ssl/etcd/ssl/server.crt \
|
||||
--key=/etc/ssl/etcd/ssl/server.key \
|
||||
endpoint health >/dev/null 2>&1; then
|
||||
echo "✅ etcd is healthy"
|
||||
exit 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
echo "❌ etcd is not healthy within 1 minute"
|
||||
exit 1
|
||||
|
|
@ -11,7 +11,7 @@ ENDPOINTS='https://{{ .internal_ipv4 }}:2379'
|
|||
ENDPOINTS='https://{{ .internal_ipv6 }}:2379'
|
||||
{{- end }}
|
||||
ETCD_DATA_DIR="{{ .etcd.env.data_dir }}"
|
||||
BACKUP_DIR="{{ .etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
|
||||
BACKUP_DIR="${BACKUP_DIR:-{{ .etcd.backup.backup_dir }}/timer/etcd-$(date +%Y-%m-%d-%H-%M-%S)}"
|
||||
KEEPBACKUPNUMBER='{{ .etcd.backup.keep_backup_number }}'
|
||||
((KEEPBACKUPNUMBER++))
|
||||
|
||||
|
|
@ -21,7 +21,7 @@ ETCDCTL_CA_FILE="/etc/ssl/etcd/ssl/ca.crt"
|
|||
|
||||
[ ! -d $BACKUP_DIR ] && mkdir -p $BACKUP_DIR
|
||||
|
||||
export ETCDCTL_API=2;$ETCDCTL_PATH backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR
|
||||
export ETCDCTL_API=3;$ETCDCTL_PATH backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR
|
||||
|
||||
sleep 3
|
||||
|
||||
|
|
|
|||
|
|
@ -1,34 +1,34 @@
|
|||
---
|
||||
- name: Check if docker is installed
|
||||
- name: Docker | Check if Docker is installed on the system
|
||||
ignore_errors: true
|
||||
command: docker --version
|
||||
register: docker_install_version
|
||||
|
||||
- name: Install docker
|
||||
when: or (.docker_install_version.stderr | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
- name: Docker | Install and configure Docker if not present or version mismatch
|
||||
when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
|
||||
block:
|
||||
- name: Sync docker binary to remote
|
||||
- name: Docker | Copy Docker binary archive to the remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/docker/{{ .docker_version }}/{{ .binary_type }}/docker-{{ .docker_version }}.tgz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/docker-{{ .docker_version }}.tgz
|
||||
- name: Generate docker config file
|
||||
- name: Docker | Generate Docker configuration file
|
||||
template:
|
||||
src: daemon.json
|
||||
dest: /etc/docker/daemon.json
|
||||
- name: Unpackage docker binary
|
||||
- name: Docker | Extract Docker binaries to /usr/local/bin
|
||||
command: |
|
||||
tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz --wildcards docker/*
|
||||
- name: Generate docker service file
|
||||
- name: Docker | Deploy the Docker systemd service file
|
||||
copy:
|
||||
src: docker.service
|
||||
dest: /etc/systemd/system/docker.service
|
||||
- name: Generate containerd service file
|
||||
- name: Docker | Deploy the containerd systemd service file
|
||||
copy:
|
||||
src: containerd.service
|
||||
dest: /etc/systemd/system/containerd.service
|
||||
- name: Start docker service
|
||||
- name: Docker | Start and enable Docker and containerd services
|
||||
command: |
|
||||
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
|
||||
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
- name: Check if docker-compose is installed
|
||||
ignore_errors: true
|
||||
command: docker-compose --version
|
||||
register: dockercompose_install_version
|
||||
|
||||
- name: Sync docker-compose to remote
|
||||
when: or (.dockercompose_install_version.stderr | empty | not) (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version))
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type }}/docker-compose
|
||||
dest: /usr/local/bin/docker-compose
|
||||
mode: 0755
|
||||
|
|
@ -1,4 +1,15 @@
|
|||
---
|
||||
- include_tasks: docker.yaml
|
||||
|
||||
- include_tasks: docker_compose.yaml
|
||||
- name: DockerCompose | Verify if Docker Compose is installed on the system
|
||||
ignore_errors: true
|
||||
command: docker-compose --version
|
||||
register: dockercompose_install_version
|
||||
|
||||
- name: DockerCompose | Install or update Docker Compose if not present or version mismatch
|
||||
when: or (.dockercompose_install_version.error | empty | not) (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version))
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type }}/docker-compose
|
||||
dest: /usr/local/bin/docker-compose
|
||||
mode: 0755
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: Sync registry image to remote
|
||||
- name: DockerRegistry | Synchronize registry image archive to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/image-registry/docker-registry/{{ .docker_registry_version }}/{{ .binary_type }}/docker-registry-{{ .docker_registry_version }}-linux-{{ .binary_type }}.tgz
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-{{ .binary_type }}.tgz
|
||||
|
||||
- name: Mount NFS dir
|
||||
- name: DockerRegistry | Ensure NFS directory is mounted for registry storage
|
||||
command: |
|
||||
{{- if .os.release.ID_LIKE | eq "debian" }}
|
||||
yum update && yum install -y nfs-utils
|
||||
|
|
@ -26,47 +26,47 @@
|
|||
- .image_registry.docker_registry.storage.filesystem.nfs_mount | empty | not
|
||||
- .groups.nfs | default list | len | eq 1
|
||||
|
||||
- name: Load registry image
|
||||
- name: DockerRegistry | Load registry image into Docker
|
||||
command: |
|
||||
docker load -i /opt/docker-registry/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-{{ .binary_type }}.tgz
|
||||
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: DockerRegistry | Synchronize registry certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/ssl/server.crt
|
||||
|
||||
- name: Sync image registry key file to remote
|
||||
- name: DockerRegistry | Synchronize registry private key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/ssl/server.key
|
||||
|
||||
- name: Generate registry docker compose
|
||||
- name: DockerRegistry | Generate Docker Compose file for registry
|
||||
template:
|
||||
src: docker-compose.yaml
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/docker-compose.yml
|
||||
|
||||
- name: Generate registry config
|
||||
- name: DockerRegistry | Generate registry configuration file
|
||||
template:
|
||||
src: config.yaml
|
||||
dest: >-
|
||||
/opt/docker-registry/{{ .docker_registry_version }}/config.yml
|
||||
|
||||
- name: Register registry service
|
||||
- name: DockerRegistry | Register registry as a systemd service
|
||||
template:
|
||||
src: docker-registry.service
|
||||
dest: /etc/systemd/system/docker-registry.service
|
||||
|
||||
- name: Start registry service
|
||||
- name: DockerRegistry | Start and enable registry systemd service
|
||||
command: systemctl daemon-reload && systemctl start docker-registry.service && systemctl enable docker-registry.service
|
||||
|
||||
- name: wait registry service ready
|
||||
- name: DockerRegistry | Wait for registry service to become available
|
||||
command: |
|
||||
if ! timeout 300 bash -c 'while ! nc -zv localhost 443; do sleep 2; done'; then
|
||||
echo "ERROR: Harbor did not start within 5 minutes!"
|
||||
echo "ERROR: Docker Registry did not start within 5 minutes!"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,53 +1,53 @@
|
|||
---
|
||||
- name: Sync harbor package to remote
|
||||
- name: Harbor | Synchronize Harbor offline package to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/image-registry/harbor/{{ .harbor_version }}/{{ .binary_type }}/harbor-offline-installer-{{ .harbor_version }}.tgz
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
|
||||
|
||||
- name: Untar harbor package
|
||||
- name: Harbor | Extract Harbor offline package
|
||||
command: |
|
||||
cd /opt/harbor/{{ .harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ .harbor_version }}.tgz
|
||||
|
||||
- name: Sync image registry cert file to remote
|
||||
- name: Harbor | Synchronize image registry certificate to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.crt
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/ssl/server.crt
|
||||
|
||||
- name: Sync image registry key file to remote
|
||||
- name: Harbor | Synchronize image registry private key to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/pki/image_registry.key
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/ssl/server.key
|
||||
|
||||
- name: Generate harbor config
|
||||
- name: Harbor | Generate Harbor configuration file
|
||||
template:
|
||||
src: harbor.yml
|
||||
dest: >-
|
||||
/opt/harbor/{{ .harbor_version }}/harbor/harbor.yml
|
||||
|
||||
- name: Install harbor
|
||||
- name: Harbor | Install Harbor registry
|
||||
command: |
|
||||
cd /opt/harbor/{{ .harbor_version }}/harbor && /bin/bash install.sh
|
||||
|
||||
- name: Register harbor service
|
||||
- name: Harbor | Register Harbor as a systemd service
|
||||
template:
|
||||
src: harbor.service
|
||||
dest: /etc/systemd/system/harbor.service
|
||||
|
||||
- name: Start harbor service
|
||||
- name: Harbor | Start and enable Harbor service
|
||||
command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service
|
||||
|
||||
- name: HA harbor sync images
|
||||
- name: Harbor | Configure HA and synchronize Harbor images
|
||||
when:
|
||||
- .image_registry.ha_vip | empty | not
|
||||
- .groups.image_registry | len | lt 1
|
||||
block:
|
||||
- name: add keepalived service to docker-compose
|
||||
- name: Harbor | Add keepalived service to Harbor docker-compose
|
||||
command: |
|
||||
KEEPALIVED_SERVICE='# keepalived is generated by kubekey.
|
||||
keepalived:
|
||||
|
|
@ -86,17 +86,16 @@
|
|||
{ print }
|
||||
' "$TARGET_FILE" > "$TMP_FILE" && mv "$TMP_FILE" "$TARGET_FILE"
|
||||
systemctl restart harbor.service
|
||||
- name: wait harbor service ready
|
||||
- name: Harbor | Wait for Harbor service to become ready
|
||||
command: |
|
||||
if ! timeout 300 bash -c 'while ! nc -zv localhost 443; do sleep 2; done'; then
|
||||
echo "ERROR: Harbor did not start within 5 minutes!"
|
||||
exit 1
|
||||
fi
|
||||
- name: sync harbor-replications scripts to remote
|
||||
- name: Harbor | Synchronize harbor-replications script to remote host
|
||||
template:
|
||||
src: harbor-replications.sh
|
||||
dest: /opt/harbor/scripts/harbor-replications.sh
|
||||
mode: 0755
|
||||
- name: execute harbor-replications.sh
|
||||
command: bash /opt/harbor/scripts/harbor-replications.sh
|
||||
|
||||
- name: Harbor | Execute harbor-replications script
|
||||
command: bash /opt/harbor/scripts/harbor-replications.sh
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Get interface from ha_vip
|
||||
- name: Keepalived | Discover network interface for HA VIP
|
||||
block:
|
||||
- name: Get all interface with cidr
|
||||
- name: Keepalived | Gather all network interfaces with CIDR addresses
|
||||
command: |
|
||||
ip -o addr show | awk '
|
||||
BEGIN {
|
||||
|
|
@ -24,40 +24,40 @@
|
|||
'
|
||||
register: interface
|
||||
register_type: json
|
||||
- name: filter interface by ha_vip
|
||||
- name: Keepalived | Select interface matching HA VIP
|
||||
set_fact:
|
||||
ha_vip_interface: >-
|
||||
{{- $interface := "" }}
|
||||
{{- range .interface.stdout | default list -}}
|
||||
{{- if .cidr | ipInCIDR | has $.image_registry.ha_vip -}}
|
||||
{{- $interface = .interface -}}
|
||||
{{- end -}}
|
||||
ha_vip_interface: >-
|
||||
{{- $interface := "" }}
|
||||
{{- range .interface.stdout | default list -}}
|
||||
{{- if .cidr | ipInCIDR | has $.image_registry.ha_vip -}}
|
||||
{{- $interface = .interface -}}
|
||||
{{- end -}}
|
||||
{{ $interface }}
|
||||
- name: Check if network is exist
|
||||
{{- end -}}
|
||||
{{ $interface }}
|
||||
- name: Keepalived | Ensure matching network interface exists
|
||||
assert:
|
||||
that: .kube_vip_interface | empty
|
||||
fail_msg: "cannot find network interface to match ha_vip"
|
||||
fail_msg: "Cannot find a network interface that matches the HA VIP."
|
||||
|
||||
- name: Sync keepalived image to remote
|
||||
- name: Keepalived | Synchronize keepalived image to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/image-registry/keepalived/{{ .keepalived_version }}/{{ .binary_type }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type }}.tgz
|
||||
dest: >-
|
||||
/opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type }}.tgz
|
||||
|
||||
- name: Load keeplived image
|
||||
- name: Keepalived | Load keepalived image into Docker
|
||||
command: |
|
||||
docker load -i /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type }}.tgz
|
||||
|
||||
- name: Sync keeplived config to remote
|
||||
- name: Keepalived | Synchronize keepalived configuration file to remote host
|
||||
template:
|
||||
src: keepalived.conf
|
||||
dest: >-
|
||||
/opt/keepalived/{{ .keepalived_version }}/keepalived.conf
|
||||
mode: 0664
|
||||
|
||||
- name: Sync healthcheck shell to remote
|
||||
- name: Keepalived | Synchronize healthcheck script to remote host
|
||||
copy:
|
||||
src: healthcheck.sh
|
||||
dest: >-
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Sync images to remote
|
||||
- name: ImageRegistry | Synchronize images to remote host
|
||||
tags: ["only_image"]
|
||||
copy:
|
||||
src: >-
|
||||
|
|
@ -7,24 +7,24 @@
|
|||
dest: >-
|
||||
{{ .image_registry.images_dir }}
|
||||
|
||||
- name: Create harbor project for each image
|
||||
- name: ImageRegistry | Ensure Harbor project exists for each image
|
||||
tags: ["only_image"]
|
||||
command: |
|
||||
# Iterate through first-level subdirectories in images_dir (skip blobs)
|
||||
# Traverse first-level subdirectories in images_dir, skipping 'blobs'
|
||||
for registry_dir in {{ .image_registry.images_dir }}*; do
|
||||
if [ ! -d "$registry_dir" ] || [ "$(basename "$registry_dir")" = "blobs" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Iterate through second-level subdirectories in registry_dir
|
||||
|
||||
# Traverse second-level subdirectories in each registry_dir
|
||||
for project_dir in "$registry_dir"/*; do
|
||||
if [ ! -d "$project_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
project=$(basename "$project_dir")
|
||||
|
||||
# Check if project exists, create if not
|
||||
|
||||
# Check if the Harbor project exists; create it if it does not
|
||||
resp=$(curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X GET "https://{{ .image_registry.auth.registry }}/api/v2.0/projects/${project}")
|
||||
if echo "$resp" | grep -q '"code":"NOT_FOUND"'; then
|
||||
curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST \
|
||||
|
|
@ -36,7 +36,7 @@
|
|||
done
|
||||
when: .image_registry.type | eq "harbor"
|
||||
|
||||
- name: Sync images package to image_registry
|
||||
- name: ImageRegistry | Push images package to image registry
|
||||
tags: ["only_image"]
|
||||
image:
|
||||
push:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Generate nfs config
|
||||
- name: NFS | Generate NFS server export configuration
|
||||
template:
|
||||
src: exports
|
||||
dest: /etc/exports
|
||||
|
||||
- name: Create share directory
|
||||
- name: NFS | Ensure NFS share directories exist with correct permissions
|
||||
loop: "{{ .nfs.share_dir | toJson }}"
|
||||
command: |
|
||||
if [ ! -d {{ .item }} ]; then
|
||||
|
|
@ -13,11 +13,11 @@
|
|||
chown nobody:nobody {{ .item }}
|
||||
fi
|
||||
|
||||
- name: Export share directory and start nfs server
|
||||
- name: NFS | Export share directories and start NFS server service
|
||||
command: |
|
||||
exportfs -a
|
||||
{{- if .os.release.ID_LIKE | eq "debian" }}
|
||||
systemctl enable nfs-kernel-server && systemctl restart nfs-kernel-server
|
||||
{{- else if .os.release.ID_LIKE | eq "rhel fedora"}}
|
||||
systemctl enable nfs-server.service && systemctl restart nfs-server.service
|
||||
systemctl enable nfs-server.service && systemctl restart nfs-server.service
|
||||
{{- end }}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: security enhancement for etcd
|
||||
- name: Security | Enhance etcd node security permissions
|
||||
command: |
|
||||
chmod 700 /etc/ssl/etcd/ssl && chown root:root /etc/ssl/etcd/ssl
|
||||
chmod 600 /etc/ssl/etcd/ssl/* && chown root:root /etc/ssl/etcd/ssl/*
|
||||
|
|
@ -7,7 +7,7 @@
|
|||
chmod 550 /usr/local/bin/etcd* && chown root:root /usr/local/bin/etcd*
|
||||
when: .groups.etcd | default list | has .inventory_hostname
|
||||
|
||||
- name: security enhancement for control plane
|
||||
- name: Security | Apply security best practices for control plane nodes
|
||||
command: |
|
||||
chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes
|
||||
chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/*
|
||||
|
|
@ -23,7 +23,7 @@
|
|||
chmod 640 /etc/systemd/system/k8s-certs-renew* && chown root:root /etc/systemd/system/k8s-certs-renew*
|
||||
when: .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
|
||||
- name: security enhancement for worker
|
||||
- name: Security | Apply security best practices for worker nodes
|
||||
command: |
|
||||
chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes
|
||||
chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/*
|
||||
|
|
@ -36,4 +36,4 @@
|
|||
chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin
|
||||
chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml
|
||||
chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service*
|
||||
when: .groups.kube_worker | default list | has .inventory_hostname
|
||||
when: .groups.kube_worker | default list | has .inventory_hostname
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: Generate local manifest
|
||||
- name: Local | Generate the local storage manifest
|
||||
template:
|
||||
src: local-volume.yaml
|
||||
dest: /etc/kubernetes/addons/local-volume.yaml
|
||||
|
||||
- name: deploy local
|
||||
- name: Local | Deploy the local storage manifest
|
||||
command: |
|
||||
kubectl apply -f /etc/kubernetes/addons/local-volume.yaml
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: Sync nfs provisioner helm to remote
|
||||
- name: NFS | Synchronize NFS provisioner Helm chart to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
|
||||
dest: >-
|
||||
/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
|
||||
|
||||
- name: Deploy nfs provisioner
|
||||
- name: NFS | Deploy the NFS provisioner using Helm
|
||||
command: |
|
||||
helm upgrade --install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz --namespace kube-system \
|
||||
--set nfs.server={{ .sc.nfs.server }} --set nfs.path={{ .sc.nfs.path }} \
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
---
|
||||
- name: Generate renew script
|
||||
- name: Certs | Generate Kubernetes certificate renewal script
|
||||
template:
|
||||
src: renew_script.sh
|
||||
dest: /usr/local/bin/kube-scripts/renew_script.sh
|
||||
mode: 0755
|
||||
|
||||
- name: Sync renew service
|
||||
- name: Certs | Deploy certificate renewal systemd service
|
||||
copy:
|
||||
src: k8s-certs-renew.service
|
||||
dest: /etc/systemd/system/k8s-certs-renew.service
|
||||
|
||||
- name: Sync renew timer
|
||||
- name: Certs | Deploy certificate renewal systemd timer
|
||||
copy:
|
||||
src: k8s-certs-renew.timer
|
||||
dest: /etc/systemd/system/k8s-certs-renew.timer
|
||||
|
||||
- name: Enable renew service
|
||||
command:
|
||||
systemctl daemon-reload && systemctl enable --now k8s-certs-renew.timer
|
||||
- name: Certs | Enable and start certificate renewal timer
|
||||
command: systemctl daemon-reload && systemctl enable --now k8s-certs-renew.timer
|
||||
|
|
|
|||
|
|
@ -1,21 +1,20 @@
|
|||
---
|
||||
- name: Generate coredns config
|
||||
- name: DNS | Generate CoreDNS configuration file
|
||||
template:
|
||||
src: dns/coredns.yaml
|
||||
dest: /etc/kubernetes/coredns.yaml
|
||||
|
||||
# change clusterIP for service
|
||||
# change configmap for coredns
|
||||
- name: Apply coredns config
|
||||
# Update the clusterIP for the service and modify the CoreDNS ConfigMap as needed
|
||||
- name: DNS | Apply CoreDNS configuration and restart deployment
|
||||
command: |
|
||||
kubectl delete svc kube-dns -n kube-system
|
||||
kubectl apply -f /etc/kubernetes/coredns.yaml && kubectl rollout restart deployment -n kube-system coredns
|
||||
|
||||
- name: Generate nodelocaldns daemonset
|
||||
- name: DNS | Generate NodeLocalDNS DaemonSet manifest
|
||||
template:
|
||||
src: dns/nodelocaldns.yaml
|
||||
dest: /etc/kubernetes/nodelocaldns.yaml
|
||||
|
||||
- name: Apply nodelocaldns daemonset
|
||||
- name: DNS | Deploy NodeLocalDNS DaemonSet
|
||||
command: |
|
||||
kubectl apply -f /etc/kubernetes/nodelocaldns.yaml
|
||||
|
|
|
|||
|
|
@ -1,29 +1,5 @@
|
|||
---
|
||||
- name: Sync external etcd config
|
||||
when:
|
||||
- and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | empty | not)
|
||||
- .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Sync etcd ca file to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/root.crt
|
||||
dest: /etc/kubernetes/pki/etcd/ca.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd cert files to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/etcd.crt
|
||||
dest: /etc/kubernetes/pki/etcd/client.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd key files to remote
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/etcd.key
|
||||
dest: /etc/kubernetes/pki/etcd/client.key
|
||||
mode: 0755
|
||||
|
||||
- name: Generate kubeadm init config
|
||||
- name: Init | Generate kubeadm initialization configuration
|
||||
template:
|
||||
src: >-
|
||||
{{- if .kube_version | semverCompare ">=v1.24.0" -}}
|
||||
|
|
@ -33,19 +9,19 @@
|
|||
{{- end -}}
|
||||
dest: /etc/kubernetes/kubeadm-config.yaml
|
||||
|
||||
- name: Init kubernetes cluster
|
||||
- name: Init | Initialize Kubernetes cluster
|
||||
block:
|
||||
- name: pre init
|
||||
- name: Init | Pre-initialization for kube-vip
|
||||
when:
|
||||
- .kube_version | semverCompare ">=v1.29.0"
|
||||
- eq .kubernetes.control_plane_endpoint.type "kube_vip"
|
||||
command: |
|
||||
sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \
|
||||
/etc/kubernetes/manifests/kube-vip.yaml
|
||||
- name: init
|
||||
- name: Init | Run kubeadm init
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
|
||||
- name: post init
|
||||
- name: Init | Post-initialization for kube-vip
|
||||
when:
|
||||
- .kube_version | semverCompare ">=v1.29.0"
|
||||
- eq .kubernetes.control_plane_endpoint.type "kube_vip"
|
||||
|
|
@ -53,9 +29,10 @@
|
|||
sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' \
|
||||
/etc/kubernetes/manifests/kube-vip.yaml
|
||||
|
||||
# reset localDNS 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}.
|
||||
# if not the control_plane_endpoint will valid after kube_vip pod running. the task which will execute kubectl apply in current node may be failed.
|
||||
- name: reset control_plane_endpoint localDNS
|
||||
# Reset local DNS for control_plane_endpoint to 127.0.0.1 and ::1.
|
||||
# This ensures the control_plane_endpoint resolves locally before kube-vip is running,
|
||||
# preventing failures for tasks that execute kubectl apply on the current node.
|
||||
- name: Init | Reset local DNS for control_plane_endpoint
|
||||
command: |
|
||||
sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }}
|
||||
cat >> {{ .item }} <<EOF
|
||||
|
|
@ -66,27 +43,27 @@
|
|||
EOF
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
|
||||
- name: Copy kubeconfig to default dir
|
||||
- name: Init | Copy kubeconfig to default directory
|
||||
command: |
|
||||
if [ ! -d /root/.kube ]; then
|
||||
mkdir -p /root/.kube
|
||||
fi
|
||||
cp -f /etc/kubernetes/admin.conf /root/.kube/config
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
when: .kubernetes_install_LoadState.stdout | eq "not-found"
|
||||
|
||||
- name: Set to worker node
|
||||
- name: Init | Configure node as worker
|
||||
when: .groups.kube_worker | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Remote master taint
|
||||
- name: Init | Remove master/control-plane taints from node
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
- name: Init | Add worker label to node
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
|
||||
|
||||
- name: Add annotations
|
||||
- name: Init | Add custom annotations to node
|
||||
when: .annotations | empty | not
|
||||
command: |
|
||||
kubectl annotate {{ .hostname }} {{- range $k,$v := .annotations }}{{ printf "%s=%s" $k $v}} {{- end }}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: select init node
|
||||
- name: InitKubernetes | Select the initialization node for the cluster
|
||||
run_once: true
|
||||
add_hostvars:
|
||||
hosts: k8s_cluster
|
||||
|
|
@ -8,9 +8,9 @@
|
|||
{{- $initNodes := list -}}
|
||||
{{- $notInitNodes := list -}}
|
||||
{{- range .groups.kube_control_plane -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_service" "stdout" | eq "active" -}}
|
||||
{{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}}
|
||||
{{- $initNodes = append $initNodes . -}}
|
||||
{{- else if index $.hostvars . "kubernetes_install_service" "stdout" | eq "inactive" -}}
|
||||
{{- else if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "not-found" -}}
|
||||
{{- $notInitNodes = append $notInitNodes . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
@ -24,7 +24,7 @@
|
|||
{{ index $notInitNodes (randInt 0 ((sub ($notInitNodes | len) 1) | int)) }}
|
||||
{{- end -}}
|
||||
|
||||
- name: Set control_plane_endpoint hosts to localDNS file
|
||||
- name: InitKubernetes | Configure control_plane_endpoint in local DNS files
|
||||
when:
|
||||
- or (.kubernetes.control_plane_endpoint.type | eq "local") (.kubernetes.control_plane_endpoint.type | eq "haproxy")
|
||||
- .inventory_hostname | eq .init_kubernetes_node | not
|
||||
|
|
@ -42,34 +42,34 @@
|
|||
EOF
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
|
||||
- name: Init kubernetes
|
||||
- name: InitKubernetes | Initialize the Kubernetes cluster and distribute credentials
|
||||
when: eq .inventory_hostname .init_kubernetes_node
|
||||
block:
|
||||
- include_tasks: init_kubernetes.yaml
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
when: .kubernetes_install_LoadState.stdout | eq "not-found"
|
||||
- include_tasks: deploy_cluster_dns.yaml
|
||||
- name: Fetch kubeconfig to local
|
||||
- name: InitKubernetes | Fetch kubeconfig to local workspace
|
||||
fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: >-
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
- name: Generate certificate key by kubeadm
|
||||
- name: InitKubernetes | Generate certificate key using kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config=/etc/kubernetes/kubeadm-config.yaml 2>&1 \
|
||||
| awk '/Using certificate key:/{getline; print}'
|
||||
register: kubeadm_cert_result
|
||||
- name: add certificate key to all hosts
|
||||
- name: InitKubernetes | Distribute certificate key to all cluster hosts
|
||||
add_hostvars:
|
||||
hosts: k8s_cluster
|
||||
vars:
|
||||
kubeadm_cert: >-
|
||||
{{ .kubeadm_cert_result.stdout }}
|
||||
- name: Generate kubeadm token
|
||||
- name: InitKubernetes | Generate and distribute kubeadm token
|
||||
block:
|
||||
- name: Generate token by kubeadm
|
||||
- name: InitKubernetes | Generate kubeadm join token
|
||||
command: /usr/local/bin/kubeadm token create
|
||||
register: kubeadm_token_result
|
||||
- name: add token to all hosts
|
||||
- name: InitKubernetes | Share kubeadm token with all cluster hosts
|
||||
add_hostvars:
|
||||
hosts: k8s_cluster
|
||||
vars:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Generate kubeadm join config
|
||||
- name: Join | Generate kubeadm join configuration file
|
||||
template:
|
||||
src: >-
|
||||
{{- if .kube_version | semverCompare ">=v1.24.0" -}}
|
||||
|
|
@ -9,38 +9,39 @@
|
|||
{{- end -}}
|
||||
dest: /etc/kubernetes/kubeadm-config.yaml
|
||||
|
||||
- name: Join kubernetes cluster
|
||||
- name: Join | Execute kubeadm join to add node to the Kubernetes cluster
|
||||
command: |
|
||||
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
|
||||
|
||||
- name: Sync kubeconfig to remote
|
||||
- name: Join | Synchronize kubeconfig to remote node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
dest: /root/.kube/config
|
||||
|
||||
- name: Set to worker node
|
||||
- name: Join | Configure node as worker
|
||||
when: .groups.kube_worker | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Remote master taint
|
||||
- name: Join | Remove master and control-plane taints from node
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
- name: Join | Add worker label to node
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
|
||||
|
||||
- name: Add annotations
|
||||
- name: Join | Add custom annotations to node
|
||||
when: .annotations | empty | not
|
||||
command: |
|
||||
kubectl annotate {{ .hostname }} {{- range $k,$v := .annotations }}{{ printf "%s=%s" $k $v}} {{- end }}
|
||||
|
||||
# reset localDNS 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}.
|
||||
# if not the control_plane_endpoint will valid after kube_vip pod running. the task which will execute kubectl apply in current node may be failed.
|
||||
- name: reset control_plane_endpoint localDNS
|
||||
# Reset local DNS for control_plane_endpoint to 127.0.0.1 and ::1.
|
||||
# This ensures the control_plane_endpoint resolves locally before kube-vip is running,
|
||||
# preventing failures for tasks that execute kubectl apply on the current node.
|
||||
- name: Join | Reset local DNS for control_plane_endpoint
|
||||
block:
|
||||
- name: reset control_plane localDNS
|
||||
- name: Join | Reset local DNS on control plane nodes
|
||||
when:
|
||||
- .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
command: |
|
||||
|
|
@ -52,7 +53,7 @@
|
|||
# kubekey control_plane_endpoint END
|
||||
EOF
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
- name: reset worker localDNS
|
||||
- name: Join | Reset local DNS on worker nodes (for haproxy endpoint)
|
||||
when:
|
||||
- .groups.kube_worker | default list | has .inventory_hostname
|
||||
- .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
- include_tasks: join_kubernetes.yaml
|
||||
when:
|
||||
- ne .inventory_hostname .init_kubernetes_node
|
||||
- .kubernetes_install_service.stdout | eq "inactive"
|
||||
- .kubernetes_install_LoadState.stdout | eq "not-found"
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
- name: Generate haproxy config
|
||||
- name: Haproxy | Generate HAProxy configuration file
|
||||
template:
|
||||
src: haproxy/haproxy.cfg
|
||||
dest: /etc/kubekey/haproxy/haproxy.cfg
|
||||
|
||||
- name: Get md5 for haproxy config
|
||||
- name: Haproxy | Calculate MD5 checksum of HAProxy configuration
|
||||
command: |
|
||||
md5sum /etc/kubekey/haproxy/haproxy.cfg | cut -d" " -f1
|
||||
register: haproxy_cfg_md5
|
||||
|
||||
- name: Genrate haproxy manifest
|
||||
- name: Haproxy | Generate HAProxy manifest for Kubernetes
|
||||
template:
|
||||
src: haproxy/haproxy.yaml
|
||||
dest: /etc/kubernetes/manifests/haproxy.yaml
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Get interface from kube_vip
|
||||
- name: Kubevip | Discover network interface for kube-vip
|
||||
block:
|
||||
- name: Get all interface with cidr
|
||||
- name: Kubevip | Gather all network interfaces with CIDR addresses
|
||||
command: |
|
||||
ip -o addr show | awk '
|
||||
BEGIN {
|
||||
|
|
@ -24,22 +24,22 @@
|
|||
'
|
||||
register: interface
|
||||
register_type: json
|
||||
- name: filter interface by ha_vip
|
||||
- name: Kubevip | Select network interface matching kube-vip address
|
||||
set_fact:
|
||||
kube_vip_interface: >-
|
||||
{{- $interface := "" }}
|
||||
{{- range .interface.stdout | default list -}}
|
||||
{{- if .cidr | ipInCIDR | has $.kubernetes.control_plane_endpoint.kube_vip.address -}}
|
||||
{{- $interface = .interface -}}
|
||||
{{- end -}}
|
||||
kube_vip_interface: >-
|
||||
{{- $interface := "" }}
|
||||
{{- range .interface.stdout | default list -}}
|
||||
{{- if .cidr | ipInCIDR | has $.kubernetes.control_plane_endpoint.kube_vip.address -}}
|
||||
{{- $interface = .interface -}}
|
||||
{{- end -}}
|
||||
{{ $interface }}
|
||||
- name: Check if network is exist
|
||||
{{- end -}}
|
||||
{{ $interface }}
|
||||
- name: Kubevip | Ensure matching network interface exists
|
||||
assert:
|
||||
that: .kube_vip_interface | empty
|
||||
fail_msg: "cannot find network interface to match kube_vip"
|
||||
fail_msg: "Kubevip: Unable to find a network interface matching the kube-vip address."
|
||||
|
||||
- name: Generate kube_vip manifest
|
||||
- name: Kubevip | Generate kube-vip manifest file
|
||||
template:
|
||||
src: >-
|
||||
kubevip/kubevip.{{ .kubernetes.control_plane_endpoint.kube_vip.mode }}
|
||||
|
|
|
|||
|
|
@ -1,46 +1,48 @@
|
|||
# set localDNS for each .kubernetes.control_plane_endpoint.type
|
||||
# local:
|
||||
# before init cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after init cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# before join cluster
|
||||
# - control_plane: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after join cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# HighAvailability: Configure localDNS for each .kubernetes.control_plane_endpoint.type
|
||||
#
|
||||
# For 'local' endpoint type:
|
||||
# Before cluster initialization:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After cluster initialization:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# Before joining cluster:
|
||||
# - Control plane: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After joining cluster:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
#
|
||||
# kube_vip:
|
||||
# before init cluster
|
||||
# - control_plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after init cluster
|
||||
# - control_plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# before join cluster
|
||||
# - control_plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after join cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# For 'kube_vip' endpoint type:
|
||||
# Before cluster initialization:
|
||||
# - Control plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After cluster initialization:
|
||||
# - Control plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# Before joining cluster:
|
||||
# - Control plane: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After joining cluster:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .kubernetes.control_plane_endpoint.kube_vip.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
#
|
||||
# haproxy:
|
||||
# before init cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after init cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# before join cluster
|
||||
# - control_plane: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# after join cluster
|
||||
# - control_plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
- name: Set Control Plane to localDNS file
|
||||
# For 'haproxy' endpoint type:
|
||||
# Before cluster initialization:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After cluster initialization:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# Before joining cluster:
|
||||
# - Control plane: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# After joining cluster:
|
||||
# - Control plane: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - Worker: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
|
||||
- name: HighAvailability | Configure localDNS for control plane endpoint
|
||||
command: |
|
||||
sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }}
|
||||
cat >> {{ .item }} <<EOF
|
||||
|
|
@ -55,14 +57,16 @@
|
|||
EOF
|
||||
loop: "{{ .localDNS | toJson }}"
|
||||
|
||||
# install with static pod: https://kube-vip.io/docs/installation/static/
|
||||
- include_tasks: high-availability/kube_vip.yaml
|
||||
# HighAvailability: Install kube-vip as a static pod. See: https://kube-vip.io/docs/installation/static/
|
||||
- name: HighAvailability | Include kube-vip static pod tasks
|
||||
include_tasks: high-availability/kube_vip.yaml
|
||||
when:
|
||||
- .kubernetes.control_plane_endpoint.type | eq "kube_vip"
|
||||
- .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
|
||||
# only deploy haproxy in worker node. control_plane node use local static pod: kube-apiserver.
|
||||
- include_tasks: high-availability/haproxy.yaml
|
||||
# HighAvailability: Deploy HAProxy only on worker nodes. Control plane nodes use the local static pod for kube-apiserver.
|
||||
- name: HighAvailability | Include HAProxy deployment tasks for worker nodes
|
||||
include_tasks: high-availability/haproxy.yaml
|
||||
when:
|
||||
- .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
- .groups.kube_worker | default list | has .inventory_hostname
|
||||
|
|
|
|||
|
|
@ -1,80 +1,84 @@
|
|||
---
|
||||
- name: Check if helm is installed
|
||||
- name: Binary | Verify if Helm is already installed
|
||||
ignore_errors: true
|
||||
command: helm version
|
||||
command: helm version --template "{{ .Version }}"
|
||||
register: helm_install_version
|
||||
- name: Install helm
|
||||
when: or (.helm_install_version.stderr | empty | not) (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
|
||||
|
||||
- name: Binary | Install Helm if not present or version mismatch
|
||||
when: or (.helm_install_version.error | empty | not) (.helm_install_version.stdout | ne .helm_version)
|
||||
block:
|
||||
- name: Sync helm to remote
|
||||
- name: Binary | Copy Helm archive to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/helm/{{ .helm_version }}/{{ .binary_type }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz
|
||||
- name: Install helm
|
||||
- name: Binary | Extract and install Helm binary
|
||||
command: |
|
||||
tar --strip-components=1 -zxvf {{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin linux-{{ .binary_type }}/helm
|
||||
|
||||
- name: Check if kubeadm is installed
|
||||
- name: Binary | Check if kubeadm is installed
|
||||
ignore_errors: true
|
||||
command: kubeadm version -o short
|
||||
register: kubeadm_install_version
|
||||
- name: Install kubeadm
|
||||
when: or (.kubeadm_install_version.stderr | empty | not) (.kubeadm_install_version.stdout | ne .kube_version)
|
||||
|
||||
- name: Binary | Install kubeadm if not present or version mismatch
|
||||
when: or (.kubeadm_install_version.error | empty | not) (.kubeadm_install_version.stdout | ne .kube_version)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubeadm
|
||||
dest: /usr/local/bin/kubeadm
|
||||
mode: 0755
|
||||
|
||||
- name: Check if kubectl is installed
|
||||
- name: Binary | Check if kubectl is installed
|
||||
ignore_errors: true
|
||||
command: kubectl version --short
|
||||
register: kubectl_install_version
|
||||
register_type: yaml
|
||||
- name: Sync kubectl to remote
|
||||
|
||||
- name: Binary | Install kubectl if not present or version mismatch
|
||||
when: |
|
||||
or (.kubectl_install_version.stderr | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
|
||||
or (.kubectl_install_version.error | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubectl
|
||||
dest: /usr/local/bin/kubectl
|
||||
mode: 0755
|
||||
|
||||
- name: Check if kubelet is installed
|
||||
- name: Binary | Check if kubelet is installed
|
||||
ignore_errors: true
|
||||
command: kubelet --version
|
||||
register: kubelet_install_version
|
||||
- name: Install kubelet
|
||||
when: or (.kubelet_install_version.stderr | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
|
||||
|
||||
- name: Binary | Install kubelet if not present or version mismatch
|
||||
when: or (.kubelet_install_version.error | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
|
||||
block:
|
||||
- name: Sync kubelet to remote
|
||||
- name: Binary | Copy kubelet binary to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubelet
|
||||
dest: /usr/local/bin/kubelet
|
||||
mode: 0755
|
||||
- name: Sync kubelet env to remote
|
||||
- name: Binary | Deploy kubelet environment configuration
|
||||
template:
|
||||
src: kubelet/kubelet.env
|
||||
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
- name: Sync kubelet service to remote
|
||||
- name: Binary | Copy kubelet systemd service file
|
||||
copy:
|
||||
src: kubelet.service
|
||||
dest: /etc/systemd/system/kubelet.service
|
||||
- name: Register kubelet service
|
||||
- name: Binary | Reload systemd and enable kubelet service
|
||||
command: systemctl daemon-reload && systemctl enable kubelet.service
|
||||
|
||||
- name: Install cni plugins
|
||||
- name: Binary | Install CNI plugins if version specified
|
||||
when: .cni_plugins_version | empty | not
|
||||
block:
|
||||
- name: Sync cni-plugin to remote
|
||||
- name: Binary | Copy CNI plugins archive to remote host
|
||||
copy:
|
||||
src: >-
|
||||
{{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .binary_type }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz
|
||||
dest: >-
|
||||
{{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz
|
||||
- name: Install cni-plugin
|
||||
- name: Binary | Extract and install CNI plugins
|
||||
command: |
|
||||
tar -zxvf {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz -C /opt/cni/bin/
|
||||
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
- include_tasks: high-availability/main.yaml
|
||||
|
||||
- name: Add kube user
|
||||
- name: PreKubernetes | Ensure the 'kube' system user exists
|
||||
command: |
|
||||
id kube &>/dev/null || useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube
|
||||
|
||||
- name: Create kube directories
|
||||
- name: PreKubernetes | Create and set ownership for required Kubernetes directories
|
||||
command: |
|
||||
if [ ! -d "{{ .item.path }}" ]; then
|
||||
mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
|
||||
|
|
@ -22,41 +22,62 @@
|
|||
- {path: "/opt/cni/bin", chown: "/opt/cni"}
|
||||
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
|
||||
|
||||
- name: Sync audit policy file to remote
|
||||
- name: PreKubernetes | Synchronize audit policy file to remote node
|
||||
copy:
|
||||
src: audit
|
||||
dest: /etc/kubernetes/audit/
|
||||
when: .kubernetes.audit
|
||||
|
||||
- name: Sync ca file to kube_control_plane
|
||||
- name: PreKubernetes | Synchronize cluster CA files to control plane nodes
|
||||
when:
|
||||
- .kubernetes.certs.ca_cert | empty | not
|
||||
- .kubernetes.certs.ca_key | empty | not
|
||||
- .groups.kube_control_plane | has .inventory_hostname
|
||||
block:
|
||||
- name: Sync ca cert to kube_control_plane
|
||||
- name: PreKubernetes | Copy CA certificate to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .kubernetes.certs.ca_cert }}
|
||||
dest: /etc/kubernetes/pki/ca.crt
|
||||
- name: Sync ca key to kube_control_plane
|
||||
- name: PreKubernetes | Copy CA private key to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .kubernetes.certs.ca_key }}
|
||||
dest: /etc/kubernetes/pki/ca.key
|
||||
|
||||
- name: Sync front-proxy ca file to kube_control_plane
|
||||
- name: PreKubernetes | Ensure external etcd certificates are present on control plane nodes
|
||||
when:
|
||||
- .kubernetes.etcd.deployment_type | eq "external"
|
||||
- .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: PreKubernetes | Copy etcd CA certificate to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/root.crt
|
||||
dest: /etc/kubernetes/pki/etcd/ca.crt
|
||||
- name: PreKubernetes | Copy etcd client certificate to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/etcd.crt
|
||||
dest: /etc/kubernetes/pki/etcd/client.crt
|
||||
- name: PreKubernetes | Copy etcd client key to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .work_dir }}/kubekey/pki/etcd.key
|
||||
dest: /etc/kubernetes/pki/etcd/client.key
|
||||
|
||||
- name: PreKubernetes | Synchronize front-proxy CA files to control plane nodes
|
||||
when:
|
||||
- .kubernetes.certs.front_proxy_cert | empty | not
|
||||
- .kubernetes.certs.front_proxy_key | empty | not
|
||||
- .groups.kube_control_plane | has .inventory_hostname
|
||||
block:
|
||||
- name: Sync front-proxy cert to kube_control_plane
|
||||
- name: PreKubernetes | Copy front-proxy CA certificate to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .kubernetes.certs.front_proxy_cert }}
|
||||
dest: /etc/kubernetes/pki/front-proxy-ca.crt
|
||||
- name: Sync front-proxy key to kube_control_plane
|
||||
- name: PreKubernetes | Copy front-proxy CA private key to control plane node
|
||||
copy:
|
||||
src: >-
|
||||
{{ .kubernetes.certs.front_proxy_key }}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,24 @@
|
|||
---
|
||||
- name: Check artifact is exits
|
||||
command:
|
||||
- name: Artifact | Ensure artifact file exists
|
||||
command: |
|
||||
if [ ! -f "{{ .artifact.artifact_file }}" ]; then
|
||||
echo "Error: Artifact file '{{ .artifact.artifact_file }}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check artifact file type
|
||||
command:
|
||||
- name: Artifact | Validate artifact file extension
|
||||
command: |
|
||||
if [[ "{{ .artifact.artifact_file }}" != *{{ .item }} ]]; then
|
||||
echo "Error: Artifact file '{{ .artifact.artifact_file }}' does not have the required extension '{{ .item }}'."
|
||||
exit 1
|
||||
fi
|
||||
loop: ['.tgz','.tar.gz']
|
||||
|
||||
- name: Check md5 of artifact
|
||||
command:
|
||||
if [[ "$(md5sum {{ .artifact.artifact_file }})" != "{{ .artifact.artifact_md5 }}" ]]; then
|
||||
- name: Artifact | Verify artifact MD5 checksum
|
||||
command: |
|
||||
actual_md5=$(md5sum {{ .artifact.artifact_file }} | awk '{print $1}')
|
||||
if [[ "$actual_md5" != "{{ .artifact.artifact_md5 }}" ]]; then
|
||||
echo "Error: MD5 checksum mismatch for '{{ .artifact.artifact_file }}'. Expected '{{ .artifact.artifact_md5 }}', got '$actual_md5'."
|
||||
exit 1
|
||||
fi
|
||||
when:
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
---
|
||||
- name: Stop if container manager is not docker or containerd
|
||||
- name: CRI | Fail if container manager is not docker or containerd
|
||||
assert:
|
||||
that: .cluster_require.require_container_manager | has .cri.container_manager
|
||||
fail_msg: >-
|
||||
the container manager:{{ .cri.container_manager }}, must in "{{ .cluster_require.require_container_manager | toJson }}"
|
||||
The specified container manager "{{ .cri.container_manager }}" is not supported. Please use one of the following: {{ .cluster_require.require_container_manager | toJson }}.
|
||||
run_once: true
|
||||
when: .cri.container_manager | empty | not
|
||||
|
||||
- name: Ensure minimum containerd version
|
||||
- name: CRI | Validate minimum required containerd version
|
||||
assert:
|
||||
that: .containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required)
|
||||
fail_msg: >-
|
||||
containerd_version is too low. Minimum version {{ .cluster_require.containerd_min_version_required }}
|
||||
The detected containerd version ({{ .containerd_version }}) is below the minimum required version: {{ .cluster_require.containerd_min_version_required }}.
|
||||
run_once: true
|
||||
when:
|
||||
- .containerd_version | empty | not
|
||||
|
|
|
|||
|
|
@ -1,68 +1,50 @@
|
|||
---
|
||||
- name: Stop if etcd deployment type is not internal or external
|
||||
- name: ETCD | Fail if etcd deployment type is not 'internal' or 'external'
|
||||
assert:
|
||||
that: .cluster_require.require_etcd_deployment_type | has .kubernetes.etcd.deployment_type
|
||||
fail_msg: >-
|
||||
the etcd deployment type, should be internal or external but got {{ .kubernetes.etcd.deployment_type }}
|
||||
Invalid etcd deployment type: '{{ .kubernetes.etcd.deployment_type }}'. Expected 'internal' or 'external'.
|
||||
run_once: true
|
||||
when: .kubernetes.etcd.deployment_type | empty | not
|
||||
|
||||
- name: Stop if etcd group is empty in external etcd mode
|
||||
- name: ETCD | Fail if etcd group is empty in external etcd mode
|
||||
assert:
|
||||
that: .groups.etcd | empty | not
|
||||
fail_msg: "group \"etcd\" cannot be empty in external etcd mode"
|
||||
fail_msg: >-
|
||||
The "etcd" group must not be empty when using external etcd mode.
|
||||
run_once: true
|
||||
when: .kubernetes.etcd.deployment_type | eq "external"
|
||||
|
||||
- name: Stop if even number of etcd hosts
|
||||
- name: ETCD | Fail if the number of etcd hosts is even
|
||||
assert:
|
||||
that: (mod (.groups.etcd | len) 2) | eq 1
|
||||
fail_msg: "etcd number should be odd number"
|
||||
fail_msg: >-
|
||||
The number of etcd nodes must be odd to ensure quorum. Current count: {{ .groups.etcd | len }}.
|
||||
run_once: true
|
||||
when: .kubernetes.etcd.deployment_type | eq "external"
|
||||
|
||||
## https://cwiki.yunify.com/pages/viewpage.action?pageId=145920824
|
||||
- name: Check dev io for etcd
|
||||
- name: ETCD | Validate disk I/O performance for etcd
|
||||
when:
|
||||
- .groups.etcd | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Check fio is exist
|
||||
- name: ETCD | Check if fio is installed
|
||||
ignore_errors: true
|
||||
command: fio --version
|
||||
register: fio_install_version
|
||||
- name: Test dev io by fio
|
||||
when: .fio_install_version.stderr | empty
|
||||
- name: ETCD | Run fio disk I/O test
|
||||
when: .fio_install_version.error | empty
|
||||
block:
|
||||
- name: Get fio result
|
||||
- name: ETCD | Execute fio and collect results
|
||||
command: |
|
||||
mkdir -p {{ .tmp_dir }}/etcd/test-data
|
||||
fio --rw=write --ioengine=sync --fdatasync=1 --directory={{ .tmp_dir }}/etcd/test-data --size=22m --bs=2300 --name=mytest --output-format=json
|
||||
register: fio_result
|
||||
- name: Check fio result
|
||||
- name: ETCD | Assert disk fsync latency meets requirements
|
||||
assert:
|
||||
that: (index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000") | le .cluster_require.etcd_disk_wal_fysnc_duration_seconds
|
||||
fail_msg: >-
|
||||
etcd_disk_wal_fysnc_duration_seconds: {{ index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000" }}ns is more than {{ .cluster_require.etcd_disk_wal_fysnc_duration_seconds }}ns
|
||||
The 90th percentile fsync latency is {{ index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000" }}ns, which exceeds the maximum allowed: {{ .cluster_require.etcd_disk_wal_fysnc_duration_seconds }}ns.
|
||||
always:
|
||||
- name: Clean test data dir
|
||||
- name: ETCD | Clean up fio test data directory
|
||||
command: rm -rf {{ .tmp_dir }}/etcd/test-data
|
||||
|
||||
- name: Check if etcd has installed
|
||||
when: .groups.etcd | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Get etcd service
|
||||
ignore_errors: true
|
||||
command: systemctl is-active etcd.service
|
||||
register: etcd_install_service
|
||||
- name: Get etcd version
|
||||
ignore_errors: true
|
||||
command: etcd --version
|
||||
register: etcd_install_version
|
||||
register_type: yaml
|
||||
- name: Check if etcd has match the version
|
||||
when: .etcd_install_service.stdout | eq "active"
|
||||
assert:
|
||||
that: |
|
||||
eq (get .etcd_install_version.stdout "etcd Version") (.etcd_version | default "" | trimPrefix "v")
|
||||
fail_msg: >-
|
||||
excepted install etcd with version: {{ .etcd_version }} but has installed with: {{ get .etcd_install_version.stdout "etcd Version" }}.
|
||||
|
|
|
|||
|
|
@ -1,29 +1,29 @@
|
|||
- name: Ensure image registry authentication is successful
|
||||
- name: ImageRegistry | Verify successful authentication to image registry
|
||||
when: .image_registry.auth | empty | not
|
||||
run_once: true
|
||||
command: |
|
||||
HTTP_CODE=$(curl -skLI -w "%{http_code}" -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" "https://{{ .image_registry.auth.registry }}/v2/" -o /dev/null)
|
||||
if [[ "$HTTP_CODE" == "200" ]]; then
|
||||
echo "Authentication to image registry succeeded."
|
||||
echo "Successfully authenticated to the image registry."
|
||||
else
|
||||
echo "Authentication to image registry {{ .image_registry.auth.registry }} failed." >&2
|
||||
echo "Failed to authenticate to the image registry at {{ .image_registry.auth.registry }}." >&2
|
||||
fi
|
||||
|
||||
# image_registry is installed by docker_compose
|
||||
- name: Ensure docker and docker-compose versions are set for image registry
|
||||
# The image_registry is deployed using docker_compose
|
||||
- name: ImageRegistry | Ensure docker and docker-compose versions are specified
|
||||
when: .groups.image_registry | empty | not
|
||||
assert:
|
||||
that:
|
||||
- .docker_version | empty | not
|
||||
- .dockercompose_version | empty | not
|
||||
- .docker_version | empty | not
|
||||
- .dockercompose_version | empty | not
|
||||
msg: >-
|
||||
Both "docker_version" and "dockercompose_version" must be specified for the image registry.
|
||||
Both "docker_version" and "dockercompose_version" must be provided for the image registry deployment.
|
||||
|
||||
- name: Ensure keepalived_version is set for high availability image registry
|
||||
- name: ImageRegistry | Ensure keepalived_version is specified for high availability
|
||||
when:
|
||||
- .image_registry.ha_vip | empty | not
|
||||
- .groups.image_registry | len | lt 1
|
||||
assert:
|
||||
that: .keepalived_version | empty | not
|
||||
msg: >-
|
||||
"keepalived_version" must be specified when the image registry is configured for high availability.
|
||||
"keepalived_version" must be specified when configuring the image registry for high availability.
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
- name: Should defined internal_ipv4 or internal_ipv6
|
||||
- name: Kubernetes | Ensure either internal_ipv4 or internal_ipv6 is defined
|
||||
assert:
|
||||
that: or (.internal_ipv4 | empty | not) (.internal_ipv6 | empty | not)
|
||||
fail_msg: >-
|
||||
"internal_ipv4" and "internal_ipv6" cannot both be empty
|
||||
Either "internal_ipv4" or "internal_ipv6" must be specified. Both cannot be empty.
|
||||
|
||||
- name: Check kubevip if valid
|
||||
- name: Kubernetes | Validate kube-vip address
|
||||
run_once: true
|
||||
assert:
|
||||
that:
|
||||
|
|
@ -19,31 +19,41 @@
|
|||
{{- end }}
|
||||
{{ not $existIP }}
|
||||
fail_msg: >-
|
||||
"kubernetes.control_plane_endpoint.kube_vip.address" should be a un-used ip address.
|
||||
The value of "kubernetes.control_plane_endpoint.kube_vip.address" must be an IP address that is not currently assigned to any node.
|
||||
|
||||
when: .kubernetes.control_plane_endpoint.type | eq "kube_vip"
|
||||
|
||||
- name: Stop if unsupported version of Kubernetes
|
||||
- name: Kubernetes | Fail if unsupported Kubernetes version
|
||||
run_once: true
|
||||
assert:
|
||||
that: .kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required)
|
||||
fail_msg: >-
|
||||
the current release of KubeKey only support newer version of Kubernetes than {{ .cluster_require.kube_version_min_required }} - You are trying to apply {{ .kube_version }}
|
||||
This version of KubeKey only supports Kubernetes versions greater than or equal to {{ .cluster_require.kube_version_min_required }}. You are attempting to use version {{ .kube_version }}.
|
||||
when: .kube_version | empty | not
|
||||
|
||||
- name: Check if kubernetes installed
|
||||
- name: Kubernetes | Check if Kubernetes is installed
|
||||
when: .groups.k8s_cluster | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Get kubernetes service
|
||||
ignore_errors: true
|
||||
command: systemctl is-active kubelet.service
|
||||
register: kubernetes_install_service
|
||||
- name: Get kubernetes version
|
||||
- name: Kubernetes | Retrieve kubelet.service LoadState
|
||||
command: systemctl show kubelet.service -p LoadState --value
|
||||
register: kubernetes_install_LoadState
|
||||
- name: Retrieve kubelet.service ActiveState
|
||||
command: systemctl show kubelet.service -p ActiveState --value
|
||||
register: kubernetes_install_ActiveState
|
||||
- name: Retrieve installed Kubernetes version
|
||||
ignore_errors: true
|
||||
command: kubelet --version
|
||||
register: kubernetes_install_version
|
||||
- name: Check kubernetes service and version
|
||||
when: .kubernetes_install_service.stdout | eq "active"
|
||||
assert:
|
||||
that: .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " | eq .kube_version
|
||||
fail_msg: >-
|
||||
kubernetes has installed with version:{{ .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " }}. but not match kube_version: {{ .kube_version }}
|
||||
- name: Validate Kubernetes service status and version
|
||||
when: .kubernetes_install_LoadState.stdout | eq "loaded"
|
||||
block:
|
||||
- name: Ensure kubelet service is active
|
||||
assert:
|
||||
that: .kubernetes_install_ActiveState.stdout | eq "active"
|
||||
fail_msg: >-
|
||||
The kubelet service must be running and active when it is loaded.
|
||||
- name: Ensure installed Kubernetes version matches expected version
|
||||
assert:
|
||||
that: .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " | eq .kube_version
|
||||
fail_msg: >-
|
||||
The installed Kubernetes version ({{ .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " }}) does not match the expected version ({{ .kube_version }}).
|
||||
|
|
|
|||
|
|
@ -1,86 +1,89 @@
|
|||
---
|
||||
- name: Ensure required network interfaces are present
|
||||
command: |
|
||||
- name: Network | Ensure required network interfaces are present
|
||||
command: |
|
||||
{{- if .internal_ipv4 | empty | not }}
|
||||
if ! ip -o addr show | grep -q {{ .internal_ipv4 }}; then
|
||||
echo 'IPv4 network interface not found' >&2
|
||||
echo 'The specified IPv4 address is not assigned to any network interface.' >&2
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .internal_ipv6 | empty | not }}
|
||||
if ! ip -o addr show | grep -q {{ .internal_ipv6 }}; then
|
||||
echo 'IPv6 network interface not found' >&2
|
||||
echo 'The specified IPv6 address is not assigned to any network interface.' >&2
|
||||
fi
|
||||
{{- end }}
|
||||
|
||||
# https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
- name: Validate CIDR configuration
|
||||
- name: Network | Validate dual-stack CIDR configuration
|
||||
run_once: true
|
||||
block:
|
||||
- name: Validate pod CIDR format
|
||||
- name: Network | Check pod CIDR includes both IPv4 and IPv6
|
||||
when: .kubernetes.networking.pod_cidr | empty | not
|
||||
assert:
|
||||
that: .kubernetes.networking.pod_cidr | splitList "," | len | ge 2
|
||||
fail_msg: >-
|
||||
kubernetes.networking.pod_cidr must be specified as ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Validate service CIDR format
|
||||
"kubernetes.networking.pod_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr.
|
||||
- name: Network | Check service CIDR includes both IPv4 and IPv6
|
||||
when: .kubernetes.networking.service_cidr | empty | not
|
||||
assert:
|
||||
that: .kubernetes.networking.service_cidr | splitList "," | len | ge 2
|
||||
fail_msg: >-
|
||||
kubernetes.networking.service_cidr must be specified as ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Ensure pod networking supports dual-stack
|
||||
"kubernetes.networking.service_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr.
|
||||
- name: Network | Ensure pod networking is properly configured for dual-stack
|
||||
when:
|
||||
- .kubernetes.networking.pod_cidr | empty | not
|
||||
- .kubernetes.networking.pod_cidr | splitList "," | len | eq 2
|
||||
assert:
|
||||
that:
|
||||
that:
|
||||
- .kube_version | semverCompare ">=v1.20.0"
|
||||
- .kubernetes.networking.pod_cidr | splitList "," | first | ipFamily | eq "IPv4"
|
||||
- .kubernetes.networking.pod_cidr | splitList "," | last | ipFamily | eq "IPv6"
|
||||
fail_msg: >-
|
||||
Dual-stack pod networking is supported in Kubernetes version v1.20.0 and above.
|
||||
- name: Ensure service networking supports dual-stack
|
||||
Dual-stack pod networking is only supported in Kubernetes v1.20.0 or newer.
|
||||
- name: Network | Ensure service networking is properly configured for dual-stack
|
||||
when:
|
||||
- .kubernetes.networking.service_cidr | empty | not
|
||||
- .kubernetes.networking.service_cidr | splitList "," | len | eq 2
|
||||
assert:
|
||||
that:
|
||||
that:
|
||||
- .kube_version | semverCompare ">=v1.20.0"
|
||||
- .kubernetes.networking.service_cidr | splitList "," | first | ipFamily | eq "IPv4"
|
||||
- .kubernetes.networking.service_cidr | splitList "," | last | ipFamily | eq "IPv6"
|
||||
fail_msg: >-
|
||||
Dual-stack service networking is supported in Kubernetes version v1.20.0 and above.
|
||||
Dual-stack service networking is only supported in Kubernetes v1.20.0 or newer.
|
||||
|
||||
- name: Fail if network plugin is unsupported
|
||||
- name: Network | Fail if the selected network plugin is not supported
|
||||
run_once: true
|
||||
assert:
|
||||
that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin
|
||||
fail_msg: >-
|
||||
The specified kube_network_plugin "{{ .kubernetes.kube_network_plugin }}" is not supported.
|
||||
The network plugin "{{ .kubernetes.kube_network_plugin }}" is not supported. Please select a supported network plugin.
|
||||
when: .kubernetes.kube_network_plugin | empty | not
|
||||
|
||||
# Note: This assertion errs on the side of caution. It is technically possible to schedule more pods on a node than the available CIDR range allows, especially if some pods use the host network namespace. Since the number of such pods cannot be determined at provisioning time, this check provides a conservative guarantee.
|
||||
# Note: This check intentionally ignores the IPv6-only case.
|
||||
- name: Ensure sufficient network address space for all pods
|
||||
# Note: This check is intentionally conservative. While it is technically possible to schedule more pods than the available addresses in the CIDR range (for example, if some pods use the host network), this cannot be reliably determined at provisioning time. This check ensures there is enough address space for the configured maximum pods per node.
|
||||
# Note: IPv6-only scenarios are not checked here.
|
||||
- name: Network | Ensure sufficient address space for all pods
|
||||
run_once: true
|
||||
when: .groups.k8s_cluster | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Ensure sufficient IPv4 address space for pods
|
||||
- name: Network | Ensure enough IPv4 addresses are available for pods
|
||||
when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily | eq "IPv4"
|
||||
assert:
|
||||
that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 32 (.kubernetes.networking.ipv4_mask_size | default 24)))) 2)
|
||||
fail_msg: Do not schedule more pods on a node than there are available IPv4 addresses in the pod CIDR range.
|
||||
- name: Ensure sufficient IPv6 address space for pods
|
||||
fail_msg: >-
|
||||
The configured maximum number of pods per node exceeds the number of available IPv4 addresses in the pod CIDR range.
|
||||
- name: Network | Ensure enough IPv6 addresses are available for pods
|
||||
when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily | eq "IPv6"
|
||||
assert:
|
||||
that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 128 (.kubernetes.networking.ipv4_mask_size | default 64)))) 2)
|
||||
fail_msg: Do not schedule more pods on a node than there are available IPv6 addresses in the pod CIDR range.
|
||||
|
||||
fail_msg: >-
|
||||
The configured maximum number of pods per node exceeds the number of available IPv6 addresses in the pod CIDR range.
|
||||
|
||||
# https://github.com/alibaba/hybridnet/wiki/Getting-Started#install
|
||||
- name: Fail if Kubernetes version is too low for hybridnet
|
||||
- name: Network | Fail if Kubernetes version is too old for hybridnet
|
||||
run_once: true
|
||||
assert:
|
||||
that: .kube_version | semverCompare ">=v1.16.0"
|
||||
fail_msg: Hybridnet requires Kubernetes version 1.16 or higher.
|
||||
fail_msg: >-
|
||||
Hybridnet requires Kubernetes version 1.16.0 or newer.
|
||||
when:
|
||||
- .kubernetes.kube_network_plugin | eq "hybridnet"
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Stop if nfs server is not be one
|
||||
- name: NFS | Fail if more than one NFS server is defined
|
||||
assert:
|
||||
that: .groups.nfs | default list | len | eq 1
|
||||
fail_msg: "only one nfs server is supported"
|
||||
fail_msg: "Exactly one NFS server must be specified. Multiple NFS servers are not supported."
|
||||
when: .groups.nfs
|
||||
|
|
|
|||
|
|
@ -1,31 +1,35 @@
|
|||
---
|
||||
- name: Stop if bad hostname
|
||||
- name: OS | Fail if hostname is invalid
|
||||
assert:
|
||||
that: .hostname | regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
|
||||
fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
|
||||
fail_msg: >-
|
||||
The hostname "{{ .hostname }}" is invalid. Hostnames must use only lowercase alphanumeric characters, '.', or '-', and must start and end with an alphanumeric character.
|
||||
|
||||
- name: Stop if the os does not support
|
||||
- name: OS | Fail if operating system is not supported
|
||||
assert:
|
||||
that: or (.cluster_require.allow_unsupported_distribution_setup) (.cluster_require.supported_os_distributions | has .os.release.ID)
|
||||
fail_msg: "{{ .os.release.ID }} is not a known OS"
|
||||
fail_msg: >-
|
||||
The operating system "{{ .os.release.ID }}" is not recognized or supported.
|
||||
|
||||
- name: Stop if arch supported
|
||||
- name: OS | Fail if architecture is not supported
|
||||
assert:
|
||||
that: .cluster_require.supported_architectures | has .os.architecture
|
||||
fail_msg: "{{ .os.architecture }} is not a known arch"
|
||||
fail_msg: >-
|
||||
The system architecture "{{ .os.architecture }}" is not supported.
|
||||
|
||||
- name: Stop if memory is too small for masters
|
||||
- name: OS | Fail if master node memory is insufficient
|
||||
assert:
|
||||
that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_master_memory_mb
|
||||
when: .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
|
||||
- name: Stop if memory is too small for nodes
|
||||
- name: OS | Fail if worker node memory is insufficient
|
||||
assert:
|
||||
that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_node_memory_mb
|
||||
when:
|
||||
- .groups.kube_worker | default list | has .inventory_hostname
|
||||
|
||||
- name: Stop if kernel version is too low
|
||||
- name: OS | Fail if kernel version is too old
|
||||
assert:
|
||||
that: .os.kernel_version | splitList "-" | first | semverCompare (printf ">=%s" .cluster_require.min_kernel_version)
|
||||
fail_msg: "kernel version: {{ .os.kernel_version }} is too low, required at least: {{ .cluster_require.min_kernel_version }} "
|
||||
fail_msg: >-
|
||||
The kernel version "{{ .os.kernel_version }}" is too old. Minimum required version: {{ .cluster_require.min_kernel_version }}.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Uninstall containerd service
|
||||
- name: Containerd | Uninstall the containerd system service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop containerd.service
|
||||
|
|
@ -8,7 +8,7 @@
|
|||
systemctl daemon-reload
|
||||
systemctl reset-failed containerd.service
|
||||
|
||||
- name: Delete containerd residue files
|
||||
- name: Containerd | Remove all containerd-related files and binaries
|
||||
command: |
|
||||
rm -rf {{ .cri.containerd.data_root }}
|
||||
rm -rf /etc/containerd
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
- name: Delete cri residue files
|
||||
- name: Crictl | Remove crictl binary and clean up any residual files
|
||||
command: |
|
||||
rm -f /usr/local/bin/crictl
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: Stop cri-dockerd service
|
||||
- name: Cridockerd | Stop and disable the cri-dockerd system service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop cri-dockerd.service
|
||||
|
|
@ -8,7 +8,7 @@
|
|||
systemctl daemon-reload
|
||||
systemctl reset-failed cri-dockerd.service
|
||||
|
||||
- name: Delete cri-dockerd residue files
|
||||
- name: Cridockerd | Remove all cri-dockerd related files and binaries
|
||||
command: |
|
||||
rm -rf /etc/cri-dockerd
|
||||
rm -f /usr/local/bin/cri-dockerd
|
||||
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
- name: Stop docker service
|
||||
- name: Docker | Gracefully stop and disable the Docker service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop docker.service
|
||||
systemctl disable docker.service
|
||||
rm -rf /etc/systemd/system/docker.service*
|
||||
systemctl daemon-reload
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed docker.service
|
||||
|
||||
- name: Uninstall containerd
|
||||
- name: Docker | Completely uninstall containerd and remove all related files
|
||||
block:
|
||||
- name: Uninstall containerd service
|
||||
- name: Docker | Stop and disable the containerd service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop containerd.service
|
||||
|
|
@ -18,8 +18,8 @@
|
|||
rm -rf /etc/systemd/system/containerd.service*
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed containerd.service
|
||||
|
||||
- name: Delete containerd residue files
|
||||
|
||||
- name: Docker | Remove all containerd-related files and binaries
|
||||
command: |
|
||||
rm -rf {{ .cri.containerd.data_root }}
|
||||
rm -rf /etc/containerd
|
||||
|
|
@ -27,12 +27,12 @@
|
|||
rm -f /usr/local/bin/runc
|
||||
rm -f /usr/local/bin/ctr
|
||||
|
||||
- name: Delete docker residue files
|
||||
- name: Docker | Remove all Docker residual files and binaries
|
||||
command: |
|
||||
rm -rf {{ .cri.docker.data_root }}
|
||||
rm -rf /etc/docker
|
||||
rm -rf /usr/local/bin/docker*
|
||||
|
||||
- name: Uninstall docker interface
|
||||
- name: Docker | Remove the docker0 network interface
|
||||
ignore_errors: true
|
||||
command: ip link delete docker0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Uninstall etcd service
|
||||
- name: ETCD | Completely uninstall the etcd service and remove all related files
|
||||
block:
|
||||
- name: Stop etcd service
|
||||
- name: ETCD | Stop and disable the etcd systemd service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop etcd.service
|
||||
|
|
@ -9,19 +9,20 @@
|
|||
rm -rf /etc/systemd/system/etcd.service*
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed etcd.service
|
||||
- name: Unset Traffic Priority
|
||||
- name: ETCD | Remove traffic priority rules for etcd ports
|
||||
command: |
|
||||
tc filter del dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2379 0xffff
|
||||
tc filter del dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff
|
||||
when: .etcd.traffic_priority
|
||||
- name: Delete residue files
|
||||
- name: ETCD | Delete all etcd data, configuration, and binaries
|
||||
command: |
|
||||
rm -rf {{ .etcd.env.data_dir }}
|
||||
rm -rf /etc/ssl/etcd/
|
||||
rm -rf /etc/etcd.env
|
||||
rm -rf /usr/local/bin/etcd*
|
||||
|
||||
- name: Uninstall backup-etcd service
|
||||
- name: ETCD | Uninstall backup-etcd timer and service, and remove backup scripts
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl disable --now backup-etcd.timer
|
||||
rm /etc/systemd/system/backup-etcd.timer
|
||||
|
|
|
|||
|
|
@ -1,16 +1,16 @@
|
|||
---
|
||||
- name: Stop docker service
|
||||
- name: DockerCompose | Gracefully stop and disable the Docker service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop docker.service
|
||||
systemctl disable docker.service
|
||||
rm -rf /etc/systemd/system/docker.service*
|
||||
systemctl daemon-reload
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed docker.service
|
||||
|
||||
- name: Uninstall containerd
|
||||
- name: DockerCompose | Completely uninstall containerd and remove all related files
|
||||
block:
|
||||
- name: Uninstall containerd service
|
||||
- name: DockerCompose | Stop and disable the containerd service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop containerd.service
|
||||
|
|
@ -18,21 +18,21 @@
|
|||
rm -rf /etc/systemd/system/containerd.service*
|
||||
systemctl daemon-reload
|
||||
systemctl reset-failed containerd.service
|
||||
|
||||
- name: Delete containerd residue files
|
||||
|
||||
- name: DockerCompose | Remove all containerd-related files and binaries
|
||||
command: |
|
||||
rm -rf {{ .cri.containerd.data_root }}
|
||||
rm -rf /etc/containerd
|
||||
rm -rf /usr/local/bin/containerd*
|
||||
rm -f /usr/local/bin/runc
|
||||
rm -f /usr/local/bin/ctr
|
||||
rm -f /usr/local/bin/ctr
|
||||
|
||||
- name: Delete docker residue files
|
||||
- name: DockerCompose | Remove all Docker residual files and binaries
|
||||
command: |
|
||||
rm -rf {{ .cri.docker.data_root }}
|
||||
rm -rf /etc/docker
|
||||
rm -rf /usr/local/bin/docker*
|
||||
|
||||
- name: Uninstall docker interface
|
||||
- name: DockerCompose | Remove the docker0 network interface
|
||||
ignore_errors: true
|
||||
command: ip link delete docker0
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
- name: Stop registry service
|
||||
- name: DockerRegistry | Gracefully stop and disable the Docker Registry service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop docker-registry.service
|
||||
|
|
@ -7,13 +7,13 @@
|
|||
systemctl daemon-reload
|
||||
systemctl reset-failed docker-registry.service
|
||||
|
||||
- name: unmount nfs
|
||||
- name: DockerRegistry | Unmount NFS storage for Docker Registry if configured
|
||||
when:
|
||||
- .image_registry.docker_registry.storage.filesystem.nfs_mount | empty | not
|
||||
- .groups.nfs | default list | len | eq 1
|
||||
command: |
|
||||
unmount {{ .image_registry.docker_registry.storage.filesystem.rootdir }}
|
||||
|
||||
- name: Delete residue registry files
|
||||
- name: DockerRegistry | Remove all residual Docker Registry files and directories
|
||||
command: |
|
||||
rm -rf /opt/docker-registry/
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
- name: Stop harbor service
|
||||
- name: Harbor | Gracefully stop and disable the Harbor service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop harbor.service
|
||||
|
|
@ -7,6 +7,6 @@
|
|||
systemctl daemon-reload
|
||||
systemctl reset-failed harbor.service
|
||||
|
||||
- name: Delete residue harbor files
|
||||
- name: Harbor | Remove all residual Harbor files and directories
|
||||
command: |
|
||||
rm -rf /opt/harbor/
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
- name: Delete arp by kube-vip
|
||||
- name: Keepalived | Remove ARP and IP address entries associated with kube-vip
|
||||
command: |
|
||||
ip neigh show | grep {{ .image_registry.ha_vip }} | awk '{print $1 " dev " $3}' | xargs -r -L1 ip neigh delete
|
||||
ip -o addr show | grep {{ .image_registry.ha_vip }} | awk '{system("ip addr del "$4" dev "$2)}'
|
||||
|
||||
- name: Delete residue keepalived files
|
||||
- name: Keepalived | Remove all residual Keepalived files and directories
|
||||
command: |
|
||||
rm -rf /opt/keepalived/
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: Delete Node
|
||||
- name: Kubernetes | Completely reset the node using kubeadm
|
||||
ignore_errors: true
|
||||
command: |
|
||||
kubeadm reset -f
|
||||
|
||||
- name: Stop kubelet service
|
||||
- name: Kubernetes | Gracefully stop and disable the kubelet service
|
||||
ignore_errors: true
|
||||
command: |
|
||||
systemctl stop kubelet.service
|
||||
|
|
@ -13,9 +13,11 @@
|
|||
systemctl daemon-reload
|
||||
systemctl reset-failed kubelet.service
|
||||
|
||||
- name: Delete residue files
|
||||
- name: Kubernetes | Remove all residual Kubernetes files and directories
|
||||
command: |
|
||||
rm -rf /usr/local/bin/kubeadm && rm -rf /usr/local/bin/kubelet && rm -rf /usr/local/bin/kubectl
|
||||
rm -rf /usr/local/bin/kubeadm
|
||||
rm -rf /usr/local/bin/kubelet
|
||||
rm -rf /usr/local/bin/kubectl
|
||||
rm -rf /var/lib/kubelet/
|
||||
# If /var/log/pods/ is not cleaned up, static pods may accumulate unexpected restarts due to lingering log files interfering with their lifecycle.
|
||||
rm -rf /var/log/pods/
|
||||
|
|
|
|||
|
|
@ -3,6 +3,6 @@
|
|||
|
||||
- include_tasks: network.yaml
|
||||
|
||||
- name: Delete residue files
|
||||
- name: Kubernetes | Remove all residual Kubekey files and directories
|
||||
command: |
|
||||
rm -rf /etc/kubekey/
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
- name: Clean iptables
|
||||
- name: Network | Thoroughly clean up iptables and network interfaces
|
||||
ignore_errors: true
|
||||
command: |
|
||||
iptables -F
|
||||
iptables -X
|
||||
|
|
@ -24,7 +25,7 @@
|
|||
{{- end }}
|
||||
ip netns show 2>/dev/null | grep cni- | awk '{print $1}' | xargs -r -t -n 1 ip netns del
|
||||
|
||||
- name: Delete net.d
|
||||
- name: Network | Remove all CNI network configuration and state
|
||||
command: |
|
||||
rm -rf /etc/cni/net.d/
|
||||
rm -rf /var/lib/cni/
|
||||
|
|
@ -32,13 +33,13 @@
|
|||
rm -rf /usr/local/bin/calicoctl
|
||||
{{- end }}
|
||||
|
||||
- name: Delete arp by kube-vip
|
||||
- name: Network | Remove ARP and IP address entries for kube-vip
|
||||
when: eq .kubernetes.control_plane_endpoint.type "kube_vip"
|
||||
command: |
|
||||
ip neigh show | grep {{ .kubernetes.control_plane_endpoint.kube_vip.address }} | awk '{print $1 " dev " $3}' | xargs -r -L1 ip neigh delete
|
||||
ip -o addr show | grep {{ .kubernetes.control_plane_endpoint.kube_vip.address }} | awk '{system("ip addr del "$4" dev "$2)}'
|
||||
|
||||
- name: Rebuild cri iptables
|
||||
- name: Network | Restart container runtime to rebuild iptables rules
|
||||
ignore_errors: true
|
||||
when: or (.deleteCRI | not) (.groups.image_registry | default list | has .inventory_hostname)
|
||||
command: |
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ prometheus:
|
|||
| query | PromQL查询语句 | 字符串 | 是(除非使用info参数) | - |
|
||||
| format | 结果格式化选项:raw、value、table | 字符串 | 否 | raw |
|
||||
| time | 查询的时间点(RFC3339格式或Unix时间戳) | 字符串 | 否 | 当前时间 |
|
||||
| info | 获取Prometheus服务器信息而不执行查询 | 布尔值 | 否 | false |
|
||||
|
||||
## 输出
|
||||
|
||||
|
|
@ -67,9 +66,9 @@ prometheus:
|
|||
4. 获取Prometheus服务器信息:
|
||||
```yaml
|
||||
- name: 获取Prometheus服务器信息
|
||||
prometheus:
|
||||
info: true
|
||||
register: prometheus_info
|
||||
fetch:
|
||||
src: api/v1/status/buildinfo
|
||||
dest: info.json
|
||||
```
|
||||
|
||||
5. 使用表格格式化结果:
|
||||
|
|
@ -84,7 +83,6 @@ prometheus:
|
|||
## 注意事项
|
||||
|
||||
1. 如果需要执行查询,`query`参数是必需的
|
||||
2. 当指定`info: true`时,会忽略所有查询相关的参数
|
||||
3. 时间参数需要符合RFC3339格式(如:2023-01-01T12:00:00Z)或Unix时间戳格式
|
||||
4. 表格格式化仅适用于向量类型的结果,其他类型的结果会返回错误
|
||||
5. 为了确保安全,推荐使用HTTPS连接到Prometheus服务器
|
||||
2. 时间参数需要符合RFC3339格式(如:2023-01-01T12:00:00Z)或Unix时间戳格式
|
||||
3. 表格格式化仅适用于向量类型的结果,其他类型的结果会返回错误
|
||||
4. 为了确保安全,推荐使用HTTPS连接到Prometheus服务器
|
||||
|
|
|
|||
|
|
@ -195,7 +195,47 @@ func (pc *PrometheusConnector) PutFile(ctx context.Context, src []byte, dst stri
|
|||
|
||||
// FetchFile is not supported for Prometheus connector
|
||||
func (pc *PrometheusConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error {
|
||||
return errors.New("fetchFile operation is not supported for Prometheus connector")
|
||||
// Build query URL for server info
|
||||
infoURL, err := url.Parse(pc.url + src)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse URL for server info")
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, infoURL.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create request for server info")
|
||||
}
|
||||
|
||||
// Add authentication headers
|
||||
pc.addAuthHeaders(req)
|
||||
|
||||
// Execute request
|
||||
resp, err := pc.client.Do(req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get prometheus server info")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read response
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read server info response body")
|
||||
}
|
||||
|
||||
// Check if response is successful
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
klog.ErrorS(err, "Prometheus server info request failed",
|
||||
"statusCode", resp.StatusCode,
|
||||
"response", string(bodyBytes))
|
||||
return errors.Errorf("prometheus server info request failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Parse response
|
||||
if _, err := io.Copy(dst, resp.Body); err != nil {
|
||||
return errors.Wrap(err, "failed to copy response")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecuteCommand executes a PromQL query and returns both stdout and stderr
|
||||
|
|
@ -586,61 +626,3 @@ func addValueAndTimestamp(builder *strings.Builder, sample map[string]any, metri
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetServerInfo returns information about the Prometheus server
|
||||
// This is useful for checking server version, uptime, and other details
|
||||
func (pc *PrometheusConnector) GetServerInfo(ctx context.Context) (map[string]any, error) {
|
||||
if !pc.connected {
|
||||
return nil, errors.New("prometheus connector is not initialized, call Init() first")
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Getting Prometheus server information")
|
||||
|
||||
// Build query URL for server info
|
||||
infoURL, err := url.Parse(pc.url + "api/v1/status/buildinfo")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse URL for server info")
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, infoURL.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create request for server info")
|
||||
}
|
||||
|
||||
// Add authentication headers
|
||||
pc.addAuthHeaders(req)
|
||||
|
||||
// Execute request
|
||||
resp, err := pc.client.Do(req)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get Prometheus server info")
|
||||
return nil, errors.Wrap(err, "failed to get prometheus server info")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read response
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to read server info response body")
|
||||
return nil, errors.Wrap(err, "failed to read server info response body")
|
||||
}
|
||||
|
||||
// Check if response is successful
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
klog.ErrorS(err, "Prometheus server info request failed",
|
||||
"statusCode", resp.StatusCode,
|
||||
"response", string(bodyBytes))
|
||||
return nil, errors.Errorf("prometheus server info request failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(bodyBytes, &result); err != nil {
|
||||
klog.ErrorS(err, "Failed to parse server info response")
|
||||
return nil, errors.Wrap(err, "failed to parse server info response")
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Successfully retrieved Prometheus server information")
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import (
|
|||
"gopkg.in/yaml.v3"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -60,7 +61,7 @@ func (e *taskExecutor) Exec(ctx context.Context) error {
|
|||
if e.task.IsFailed() {
|
||||
failedMsg := "\n"
|
||||
for _, result := range e.task.Status.HostResults {
|
||||
if result.StdErr != "" {
|
||||
if result.Error != "" {
|
||||
failedMsg += fmt.Sprintf("[%s]: %s\n", result.Host, result.StdErr)
|
||||
}
|
||||
}
|
||||
|
|
@ -116,7 +117,7 @@ func (e *taskExecutor) execTask(ctx context.Context) {
|
|||
// host result for task
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess
|
||||
for _, data := range e.task.Status.HostResults {
|
||||
if data.StdErr != "" {
|
||||
if data.Error != "" {
|
||||
if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored
|
||||
} else {
|
||||
|
|
@ -133,37 +134,41 @@ func (e *taskExecutor) execTask(ctx context.Context) {
|
|||
func (e *taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) {
|
||||
return func(ctx context.Context) {
|
||||
// task result
|
||||
var stdout, stderr string
|
||||
var resErr error
|
||||
var stdout, stderr, errMsg string
|
||||
// task log
|
||||
deferFunc := e.execTaskHostLogs(ctx, h, &stdout, &stderr, &errMsg)
|
||||
defer deferFunc()
|
||||
defer func() {
|
||||
if err := e.dealRegister(stdout, stderr, h); err != nil {
|
||||
stderr = err.Error()
|
||||
}
|
||||
if stderr != "" && e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
|
||||
klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
} else if stderr != "" {
|
||||
klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
if resErr != nil {
|
||||
errMsg = resErr.Error()
|
||||
if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
|
||||
klog.V(5).ErrorS(resErr, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "error", errMsg, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
} else {
|
||||
klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "error", errMsg, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
}
|
||||
}
|
||||
resErr = errors.Join(resErr, e.dealRegister(h, stdout, stderr, errMsg))
|
||||
|
||||
// fill result
|
||||
e.task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{
|
||||
Host: h,
|
||||
Stdout: stdout,
|
||||
StdErr: stderr,
|
||||
Error: errMsg,
|
||||
}
|
||||
}()
|
||||
// task log
|
||||
deferFunc := e.execTaskHostLogs(ctx, h, &stdout, &stderr)
|
||||
defer deferFunc()
|
||||
// task execute
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(h))
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("failed to get host %s variable: %v", h, err)
|
||||
|
||||
resErr = err
|
||||
return
|
||||
}
|
||||
// convert hostVariable to map
|
||||
had, ok := ha.(map[string]any)
|
||||
if !ok {
|
||||
stderr = fmt.Sprintf("host: %s variable is not a map", h)
|
||||
resErr = errors.Errorf("host: %s variable is not a map", h)
|
||||
return
|
||||
}
|
||||
// check when condition
|
||||
if skip := e.dealWhen(had, &stdout, &stderr); skip {
|
||||
|
|
@ -172,14 +177,14 @@ func (e *taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) {
|
|||
// execute module in loop with loop item.
|
||||
// if loop is empty. execute once, and the item is null
|
||||
for _, item := range e.dealLoop(had) {
|
||||
e.executeModule(ctx, e.task, item, h, &stdout, &stderr)
|
||||
resErr = e.executeModule(ctx, e.task, item, h, &stdout, &stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execTaskHostLogs sets up and manages progress bar logging for task execution on a host.
|
||||
// It returns a cleanup function to be called when execution completes.
|
||||
func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, stderr *string) func() {
|
||||
func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, _, errMsg *string) func() {
|
||||
// placeholder format task log
|
||||
var placeholder string
|
||||
if hostnameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil {
|
||||
|
|
@ -206,7 +211,7 @@ func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, s
|
|||
bar := progressbar.NewOptions(-1, options...)
|
||||
// run progress
|
||||
go func() {
|
||||
err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(context.Context) (bool, error) {
|
||||
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(context.Context) (bool, error) {
|
||||
if bar.IsFinished() {
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -215,15 +220,14 @@ func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, s
|
|||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
klog.ErrorS(err, "failed to wait for task run to finish", "host", h)
|
||||
}
|
||||
}()
|
||||
|
||||
return func() {
|
||||
switch {
|
||||
case *stderr != "":
|
||||
case ptr.Deref(errMsg, "") != "":
|
||||
if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { // ignore
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder))
|
||||
if e.logOutput != os.Stdout {
|
||||
|
|
@ -253,20 +257,18 @@ func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, s
|
|||
}
|
||||
|
||||
// executeModule executes a single module task on a specific host.
|
||||
func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, item any, host string, stdout, stderr *string) {
|
||||
func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, item any, host string, stdout, stderr *string) (resErr error) {
|
||||
// Set loop item variable if one was provided
|
||||
if item != nil {
|
||||
// Convert item to runtime variable
|
||||
node, err := converter.ConvertMap2Node(map[string]any{_const.VariableItem: item})
|
||||
if err != nil {
|
||||
*stderr = fmt.Sprintf("convert loop item error: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "failed to convert loop item")
|
||||
}
|
||||
|
||||
// Merge item into host's runtime variables
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(node, host)); err != nil {
|
||||
*stderr = fmt.Sprintf("set loop item to variable error: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "failed to set loop item to variable")
|
||||
}
|
||||
|
||||
// Clean up loop item variable after execution
|
||||
|
|
@ -277,11 +279,11 @@ func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.T
|
|||
// Reset item to null
|
||||
resetNode, err := converter.ConvertMap2Node(map[string]any{_const.VariableItem: nil})
|
||||
if err != nil {
|
||||
*stderr = fmt.Sprintf("convert loop item error: %v", err)
|
||||
resErr = errors.Wrap(err, "failed to convert loop item")
|
||||
return
|
||||
}
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(resetNode, host)); err != nil {
|
||||
*stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
|
||||
resErr = errors.Wrap(err, "failed to clean loop item to variable")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
|
@ -290,19 +292,17 @@ func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.T
|
|||
// Get all variables for this host, including any loop item
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(host))
|
||||
if err != nil {
|
||||
*stderr = fmt.Sprintf("failed to get host %s variable: %v", host, err)
|
||||
return
|
||||
return errors.Wrapf(err, "failed to get host %s variable", host)
|
||||
}
|
||||
|
||||
// Convert host variables to map type
|
||||
had, ok := ha.(map[string]any)
|
||||
if !ok {
|
||||
*stderr = fmt.Sprintf("host: %s variable is not a map", host)
|
||||
return
|
||||
return errors.Wrapf(err, "host %s variable is not a map", host)
|
||||
}
|
||||
|
||||
// Execute the actual module with the prepared context
|
||||
*stdout, *stderr = modules.FindModule(task.Spec.Module.Name)(ctx, modules.ExecOptions{
|
||||
*stdout, *stderr, resErr = modules.FindModule(task.Spec.Module.Name)(ctx, modules.ExecOptions{
|
||||
Args: e.task.Spec.Module.Args,
|
||||
Host: host,
|
||||
Variable: e.variable,
|
||||
|
|
@ -310,8 +310,7 @@ func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.T
|
|||
Playbook: *e.playbook,
|
||||
LogOutput: e.logOutput,
|
||||
})
|
||||
|
||||
e.dealFailedWhen(had, stderr)
|
||||
return e.dealFailedWhen(had, resErr)
|
||||
}
|
||||
|
||||
// dealLoop parses the loop specification into a slice of items to iterate over.
|
||||
|
|
@ -353,38 +352,49 @@ func (e *taskExecutor) dealWhen(had map[string]any, stdout, stderr *string) bool
|
|||
|
||||
// dealFailedWhen evaluates the "failed_when" conditions for a task to determine if it should fail.
|
||||
// Returns true if the task should be marked as failed, false if it should proceed.
|
||||
func (e *taskExecutor) dealFailedWhen(had map[string]any, stderr *string) {
|
||||
func (e *taskExecutor) dealFailedWhen(had map[string]any, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(e.task.Spec.FailedWhen) > 0 {
|
||||
ok, err := tmpl.ParseBool(had, e.task.Spec.FailedWhen...)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "validate failed_when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
*stderr = fmt.Sprintf("parse failed_when condition error: %v", err)
|
||||
return errors.Wrap(err, "failed to parse failed_when condition")
|
||||
}
|
||||
if ok {
|
||||
*stderr = "reach failed_when, failed"
|
||||
return errors.New("reach failed_when, failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealRegister handles storing task output in a registered variable if specified.
|
||||
// The output can be stored as raw string, JSON, or YAML based on the register type.
|
||||
func (e *taskExecutor) dealRegister(stdout, stderr, host string) error {
|
||||
func (e *taskExecutor) dealRegister(host string, stdout, stderr, errMsg string) error {
|
||||
if e.task.Spec.Register != "" {
|
||||
var stdoutResult any = stdout
|
||||
var stderrResult any = stderr
|
||||
switch e.task.Spec.RegisterType {
|
||||
switch e.task.Spec.RegisterType { // if failed the stdout may not be json or yaml
|
||||
case "json":
|
||||
_ = json.Unmarshal([]byte(stdout), &stdoutResult)
|
||||
if err := json.Unmarshal([]byte(stdout), &stdoutResult); err != nil {
|
||||
klog.V(5).ErrorS(err, "failed to register json value")
|
||||
}
|
||||
case "yaml", "yml":
|
||||
_ = yaml.Unmarshal([]byte(stdout), &stdoutResult)
|
||||
if err := yaml.Unmarshal([]byte(stdout), &stdoutResult); err != nil {
|
||||
klog.V(5).ErrorS(err, "failed to register yaml value")
|
||||
}
|
||||
default:
|
||||
// store by string
|
||||
if s, ok := stdoutResult.(string); ok {
|
||||
stdoutResult = strings.TrimRight(s, "\n")
|
||||
}
|
||||
}
|
||||
// set variable to parent location
|
||||
node, err := converter.ConvertMap2Node(map[string]any{
|
||||
e.task.Spec.Register: map[string]any{
|
||||
"stdout": stdoutResult,
|
||||
"stderr": stderrResult,
|
||||
"error": errMsg,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
|
@ -84,32 +83,32 @@ func newAddHostvarsArgs(_ context.Context, raw runtime.RawExtension, vars map[st
|
|||
|
||||
// ModuleAddHostvars handles the "add_hostvars" module, merging variables into the specified hosts.
|
||||
// Returns empty stdout and stderr on success, or error message in stderr on failure.
|
||||
func ModuleAddHostvars(ctx context.Context, options ExecOptions) (string, string) {
|
||||
func ModuleAddHostvars(ctx context.Context, options ExecOptions) (string, string, error) {
|
||||
// Get all host variables (for context, not used directly here).
|
||||
ha, err := options.getAllVariables()
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrGetHostVariable, err
|
||||
}
|
||||
// Parse module arguments.
|
||||
args, err := newAddHostvarsArgs(ctx, options.Args, ha)
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrParseArgument, err
|
||||
}
|
||||
ahn, err := options.Variable.Get(variable.GetHostnames(args.hosts))
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, "failed to get hostnames", err
|
||||
}
|
||||
hosts, ok := ahn.([]string)
|
||||
if !ok {
|
||||
return "", "failed to get actual hosts from given \"hosts\""
|
||||
return StdoutFailed, "failed to get actual hosts from given \"hosts\"", errors.Errorf("failed to get actual hosts from given \"hosts\"")
|
||||
}
|
||||
|
||||
// Merge the provided variables into the specified hosts.
|
||||
if err := options.Variable.Merge(variable.MergeHostsRuntimeVariable(args.vars, options.Host, hosts...)); err != nil {
|
||||
return "", fmt.Sprintf("add_hostvars error: %v", err)
|
||||
return StdoutFailed, "failed to add_hostvars", errors.Wrap(err, "failed to add_hostvars")
|
||||
}
|
||||
|
||||
return "", ""
|
||||
return StdoutSuccess, "", nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ func TestModuleAddHostvars(t *testing.T) {
|
|||
name string
|
||||
opt ExecOptions
|
||||
expectStdout string
|
||||
expectStderr string
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
|
|
@ -29,8 +28,7 @@ vars:
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: "",
|
||||
expectStderr: "\"hosts\" should be string or string array",
|
||||
expectStdout: StdoutFailed,
|
||||
},
|
||||
{
|
||||
name: "missing vars",
|
||||
|
|
@ -43,8 +41,7 @@ hosts: node1
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: "",
|
||||
expectStderr: "\"vars\" should not be empty",
|
||||
expectStdout: StdoutFailed,
|
||||
},
|
||||
{
|
||||
name: "invalid hosts type",
|
||||
|
|
@ -60,8 +57,7 @@ vars:
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: "",
|
||||
expectStderr: "\"hosts\" should be string or string array",
|
||||
expectStdout: StdoutFailed,
|
||||
},
|
||||
{
|
||||
name: "string value",
|
||||
|
|
@ -76,6 +72,7 @@ vars:
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: StdoutSuccess,
|
||||
},
|
||||
{
|
||||
name: "string var value",
|
||||
|
|
@ -90,6 +87,7 @@ vars:
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: StdoutSuccess,
|
||||
},
|
||||
{
|
||||
name: "map value",
|
||||
|
|
@ -105,6 +103,7 @@ vars:
|
|||
`),
|
||||
},
|
||||
},
|
||||
expectStdout: StdoutSuccess,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -112,9 +111,8 @@ vars:
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
stdout, stderr := ModuleAddHostvars(ctx, tc.opt)
|
||||
stdout, _, _ := ModuleAddHostvars(ctx, tc.opt)
|
||||
require.Equal(t, tc.expectStdout, stdout, "stdout mismatch")
|
||||
require.Equal(t, tc.expectStderr, stderr, "stderr mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
|
||||
|
|
@ -105,51 +104,51 @@ func newAssertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]
|
|||
}
|
||||
aa.successMsg, _ = variable.StringVar(vars, args, "success_msg")
|
||||
if aa.successMsg == "" {
|
||||
aa.successMsg = StdoutTrue
|
||||
aa.successMsg = StdoutSuccess
|
||||
}
|
||||
aa.failMsg, _ = variable.StringVar(vars, args, "fail_msg")
|
||||
if aa.failMsg == "" {
|
||||
aa.failMsg = StdoutFalse
|
||||
aa.failMsg = StdoutFailed
|
||||
}
|
||||
aa.msg, _ = variable.StringVar(vars, args, "msg")
|
||||
if aa.msg == "" {
|
||||
aa.msg = StdoutFalse
|
||||
aa.msg = StdoutFailed
|
||||
}
|
||||
|
||||
return aa, nil
|
||||
}
|
||||
|
||||
// ModuleAssert handles the "assert" module, evaluating boolean conditions and returning appropriate messages
|
||||
func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) {
|
||||
func ModuleAssert(ctx context.Context, options ExecOptions) (string, string, error) {
|
||||
// get host variable
|
||||
ha, err := options.getAllVariables()
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrGetHostVariable, err
|
||||
}
|
||||
|
||||
aa, err := newAssertArgs(ctx, options.Args, ha)
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrParseArgument, err
|
||||
}
|
||||
|
||||
ok, err := tmpl.ParseBool(ha, aa.that...)
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("parse \"that\" error: %v", err)
|
||||
return StdoutFailed, "failed to parse argument of that", err
|
||||
}
|
||||
// condition is true
|
||||
if ok {
|
||||
return aa.successMsg, ""
|
||||
return aa.successMsg, "", nil
|
||||
}
|
||||
// condition is false and fail_msg is not empty
|
||||
if aa.failMsg != "" {
|
||||
return "", aa.failMsg
|
||||
return StdoutFailed, aa.failMsg, errors.New(aa.failMsg)
|
||||
}
|
||||
// condition is false and msg is not empty
|
||||
if aa.msg != "" {
|
||||
return "", aa.msg
|
||||
return StdoutFailed, aa.msg, errors.New(aa.msg)
|
||||
}
|
||||
|
||||
return StdoutFalse, "False"
|
||||
return StdoutFailed, StdoutFailed, errors.New(StdoutFailed)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ func TestAssert(t *testing.T) {
|
|||
name string
|
||||
opt ExecOptions
|
||||
exceptStdout string
|
||||
exceptStderr string
|
||||
}{
|
||||
{
|
||||
name: "non-that",
|
||||
|
|
@ -39,7 +38,7 @@ func TestAssert(t *testing.T) {
|
|||
Variable: newTestVariable(nil, nil),
|
||||
Args: runtime.RawExtension{},
|
||||
},
|
||||
exceptStderr: "\"that\" should be []string or string",
|
||||
exceptStdout: StdoutFailed,
|
||||
},
|
||||
{
|
||||
name: "success with non-msg",
|
||||
|
|
@ -52,7 +51,7 @@ func TestAssert(t *testing.T) {
|
|||
"testvalue": "a",
|
||||
}),
|
||||
},
|
||||
exceptStdout: StdoutTrue,
|
||||
exceptStdout: StdoutSuccess,
|
||||
},
|
||||
{
|
||||
name: "success with success_msg",
|
||||
|
|
@ -80,7 +79,7 @@ func TestAssert(t *testing.T) {
|
|||
"k2": "v2",
|
||||
}),
|
||||
},
|
||||
exceptStderr: "False",
|
||||
exceptStdout: StdoutFailed,
|
||||
},
|
||||
{
|
||||
name: "failed with failed_msg",
|
||||
|
|
@ -94,7 +93,7 @@ func TestAssert(t *testing.T) {
|
|||
"k2": "v2",
|
||||
}),
|
||||
},
|
||||
exceptStderr: "failed v2",
|
||||
exceptStdout: StdoutFailed,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -103,9 +102,8 @@ func TestAssert(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
|
||||
acStdout, acStderr := ModuleAssert(ctx, tc.opt)
|
||||
acStdout, _, _ := ModuleAssert(ctx, tc.opt)
|
||||
assert.Equal(t, tc.exceptStdout, acStdout)
|
||||
assert.Equal(t, tc.exceptStderr, acStderr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
|
|
@ -62,29 +61,26 @@ Return Values:
|
|||
*/
|
||||
|
||||
// ModuleCommand handles the "command" module, executing shell commands on remote hosts
|
||||
func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) {
|
||||
func ModuleCommand(ctx context.Context, options ExecOptions) (string, string, error) {
|
||||
// get host variable
|
||||
ha, err := options.getAllVariables()
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrGetHostVariable, err
|
||||
}
|
||||
// get connector
|
||||
conn, err := options.getConnector(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("failed to connector for %q error: %v", options.Host, err)
|
||||
return StdoutFailed, StderrGetConnector, err
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
// command string
|
||||
command, err := variable.Extension2String(ha, options.Args)
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
return StdoutFailed, StderrParseArgument, err
|
||||
}
|
||||
// execute command
|
||||
stdout, stderr, err := conn.ExecuteCommand(ctx, string(command))
|
||||
if err != nil {
|
||||
return "", err.Error()
|
||||
}
|
||||
return string(stdout), string(stderr)
|
||||
return string(stdout), string(stderr), err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue