feature: use go template instance pongo2 template (#2340)

* fix: misspelling

Signed-off-by: joyceliu <joyceliu@yunify.com>

* fix: misspelling

Signed-off-by: joyceliu <joyceliu@yunify.com>

* fix: misspelling

Signed-off-by: joyceliu <joyceliu@yunify.com>

* fix: misspelling

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: user go-template instance pongo2-template

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: not set incorrect

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

* feat: more beautiful progress bar

Signed-off-by: joyceliu <joyceliu@yunify.com>

---------

Signed-off-by: joyceliu <joyceliu@yunify.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
II 2024-08-05 10:06:49 +08:00 committed by GitHub
parent 3b361c9e6d
commit 4919e639c5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
302 changed files with 17395 additions and 10302 deletions

View File

@ -26,7 +26,7 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.59
version: v1.59.1
verify:
name: verify

View File

@ -79,8 +79,10 @@ GOTESTSUM_PKG := gotest.tools/gotestsum
HADOLINT_VER := v2.10.0
HADOLINT_FAILURE_THRESHOLD = warning
GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN))
GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
GORELEASER_VERSION := v2.0.1
GORELEASER_BIN := goreleaser
@ -175,7 +177,7 @@ generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RB
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
go mod tidy
@go mod tidy && go mod vendor
.PHONY: generate-goimports
generate-goimports: ## Format all import, `goimports` is required.
@ -587,22 +589,32 @@ $(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
.PHONY: $(GORELEASER)
$(GORELEASER_BIN): $(GORELEASER) ## Build a local copy of golangci-lint
$(CONTROLLER_GEN): # Build controller-gen from tools folder.
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
$(CONTROLLER_GEN): # Build controller-gen into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER); \
fi
$(GOTESTSUM): # Build gotestsum from tools folder.
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
$(GOTESTSUM): # Build gotestsum into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOTESTSUM_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER); \
fi
$(KUSTOMIZE): # Build kustomize from tools folder.
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
$(KUSTOMIZE): # Build kustomize into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_PKG) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER); \
fi
$(SETUP_ENVTEST): # Build setup-envtest from tools folder.
GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
$(SETUP_ENVTEST): # Build setup-envtest into tools folder.
if [ ! -f $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER); \
fi
$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder.
hack/ensure-golangci-lint.sh \
-b $(OUTPUT_TOOLS_DIR) \
$(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
$(GOLANGCI_LINT): # Build golangci-lint into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER); \
fi
$(GORELEASER):
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION)
$(GORELEASER): # Build goreleaser into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION); \
fi

View File

@ -7,40 +7,40 @@ spec:
# kkzone: cn
# work_dir is the directory where the artifact is extracted.
# work_dir: /var/lib/kubekey/
# cni binary
cni_version: v1.2.0
# the version of kubernetes to be installed.
# should be greater than or equal to kube_version_min_required.
kube_version: v1.23.15
# helm binary
helm_version: v3.14.2
# docker-compose binary
dockercompose_version: v2.24.6
# harbor image tag
harbor_version: v2.10.1
# registry image tag
registry_version: 2.8.3
# keepalived image tag
keepalived_version: stable
# runc binary
runc_version: v1.1.11
# cni binary
cni_version: v1.2.0
# calicoctl binary
calico_version: v3.27.2
# etcd binary
etcd_version: v3.5.6
# harbor image tag
# harbor_version: v2.10.1
# docker-compose binary
# dockercompose_version: v2.24.6
# registry image tag
# registry_version: 2.8.3
# keepalived image tag
# keepalived_version: stable
# crictl binary
crictl_version: v1.29.0
# cilium helm
cilium_version: 1.15.4
# kubeovn helm
kubeovn_version: 0.1.0
# hybridnet helm
hybridnet_version: 0.6.8
# containerd binary
containerd_version: v1.7.0
# docker binary
docker_version: 24.0.6
# cilium helm
# cilium_version: 1.15.4
# kubeovn helm
# kubeovn_version: 0.1.0
# hybridnet helm
# hybridnet_version: 0.6.8
# containerd binary
# containerd_version: v1.7.0
# runc binary
# runc_version: v1.1.11
# cridockerd
cridockerd_version: v0.3.10
# the version of kubernetes to be installed.
# should be greater than or equal to kube_version_min_required.
kube_version: v1.23.15
# cridockerd_version: v0.3.10
# nfs provisioner helm version
nfs_provisioner_version: 4.0.18
# nfs_provisioner_version: 4.0.18

View File

@ -4,22 +4,13 @@ metadata:
name: default
spec:
hosts: # your can set all nodes here. or set nodes on special groups.
# localhost: {} localhost is the default host.
# node1:
# ssh_host: xxxxx
# ssh_port: 22
# ssh_user: user
# ssh_password: password
# node2:
# ssh_host: xxxxx
# ssh_port: 22
# ssh_user: user
# ssh_password: password
# node3:
# ssh_host: xxxxx
# ssh_port: 22
# ssh_user: user
# ssh_password: password
# connector:
# type: ssh
# host: node1
# port: 22
# user: root
# password: 123456
groups:
# all kubernetes nodes.
k8s_cluster:
@ -38,10 +29,10 @@ spec:
etcd:
hosts:
- localhost
image_registry:
hosts:
- localhost
# image_registry:
# hosts:
# - localhost
# nfs nodes for registry storage. and kubernetes nfs storage
nfs:
hosts:
- localhost
# nfs:
# hosts:
# - localhost

View File

@ -5,8 +5,8 @@
tasks:
- name: Package image
image:
pull: "{{ image_manifests }}"
when: image_manifests|length > 0
pull: "{{ .image_manifests }}"
when: .image_manifests | default list | len | lt 0
- name: Export artifact
command: |
cd {{ work_dir }} && tar -czvf kubekey-artifact.tar.gz kubekey/
cd {{ .work_dir }} && tar -czvf kubekey-artifact.tar.gz kubekey/

View File

@ -13,7 +13,7 @@
- vars/certs_renew.yaml
roles:
- role: certs/renew-etcd
when: groups['etcd']|length > 0 && renew_etcd
when: and (.groups.etcd | default list | len | lt 0) .renew_etcd
- hosts:
- image_registry
@ -22,7 +22,7 @@
- vars/certs_renew.yaml
roles:
- role: certs/renew-registry
when: groups['image_registry']|length > 0 && renew_image_registry
when: and (.groups.image_registry | default list | len | lt 0) .renew_image_registry
- hosts:
- kube_control_plane
@ -31,4 +31,4 @@
tags: ["certs"]
roles:
- role: certs/renew-kubernetes
when: groups['kube_control_plane']|length > 0 && renew_kubernetes
when: and (.groups.kube_control_plane | default list | len | lt 0) .renew_kubernetes

View File

@ -37,7 +37,7 @@
- kube_control_plane
roles:
- role: install/certs
when: renew_certs.enabled|default_if_none:false
when: .renew_certs.enabled
- hosts:
- k8s_cluster|random

View File

@ -6,8 +6,11 @@
- name: Copy post install scripts to remote
ignore_errors: yes
copy:
src: "{{ work_dir }}/scripts/post_install_{{ inventory_name }}.sh"
dest: "/etc/kubekey/scripts/post_install_{{ inventory_name }}.sh"
src: |
{{ .work_dir }}/scripts/post_install_{{ .inventory_name }}.sh
dest: |
/etc/kubekey/scripts/post_install_{{ .inventory_name }}.sh
mode: 0755
- name: Execute post install scripts
command: |
for file in /etc/kubekey/scripts/post_install_*.sh; do

View File

@ -6,8 +6,11 @@
- name: Copy pre install scripts to remote
ignore_errors: yes
copy:
src: "{{ work_dir }}/scripts/pre_install_{{ inventory_name }}.sh"
dest: "/etc/kubekey/scripts/pre_install_{{ inventory_name }}.sh"
src: |
{{ .work_dir }}/scripts/pre_install_{{ .inventory_name }}.sh
dest: |
/etc/kubekey/scripts/pre_install_{{ .inventory_name }}.sh
mode: 0755
- name: Execute pre install scripts
command: |
for file in /etc/kubekey/scripts/pre_install_*.sh; do

View File

@ -3,7 +3,7 @@
- localhost
roles:
- role: precheck/artifact_check
when: artifact_file | defined
when: and .artifact.artifact_file (ne .artifact.artifact_file "")
- hosts:
- k8s_cluster

View File

@ -1,7 +1,27 @@
global_registry: ""
dockerio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}docker.io{% endif %}"
quayio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}quay.io{% endif %}"
ghcrio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}ghcr.io{% endif %}"
k8s_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}registry.k8s.io{% endif %}"
dockerio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
docker.io
{{- end -}}
quayio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
quay.io
{{- end -}}
ghcrio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
ghcr.io
{{- end -}}
k8s_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
registry.k8s.io
{{- end -}}
security_enhancement: false

View File

@ -1,21 +1,45 @@
cni:
kube_proxy: "{{ kubernetes.kube_proxy.enabled|default_if_none:true }}"
kube_proxy: |
{{- .kubernetes.kube_proxy.enabled | default true -}}
# apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja
api_version_policy: "{%if (kube_version|version:'<v1.21') %}policy/v1beta1{% else %}policy/v1{% endif %}"
kube_network_plugin: "{{ kubernetes.kube_network_plugin | default_if_none:'calico' }}"
api_version_policy: |
{{- if .kube_version | semverCompare "<v1.21" -}}
policy/v1beta1
{{- else -}}
policy/v1
{{- end -}}
kube_network_plugin: |
{{- .kubernetes.kube_network_plugin | default "calico" -}}
# ip cidr config.
# dual stack. support ipv4/ipv6
ipv6_support: "{% if (kubernetes.networking.pod_cidr|split:','|length>1) %}true{% else %}false{% endif %}"
kube_pods_v4_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|first }}"
kube_pods_v6_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|last }}"
node_cidr_mask_size: "{{ kubernetes.controller_manager.kube_network_node_prefix|default_if_none:24 }}"
kube_svc_cidr: "{{ kubernetes.networking.service_cidr|default_if_none:'10.233.0.0/18' }}"
ipv6_support: |
{{- if gt ( .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | len) 1 -}}
true
{{- else -}}
false
{{- end -}}
kube_pods_v4_cidr: |
{{- .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first -}}
kube_pods_v6_cidr: |
{{- if .cni.ipv6_support -}}
{{- .kubernetes.networking.pod_cidr | splitList "," | last -}}
{{- end -}}
node_cidr_mask_size: |
{{- .kubernetes.controller_manager.kube_network_node_prefix | default 24 -}}
kube_svc_cidr: |
{{- .kubernetes.networking.service_cidr | default "10.233.0.0/18" -}}
multus:
enabled: false
image: "{{ dockerio_registry }}/kubesphere/multus-cni:v3.8"
image: |
{{ .dockerio_registry }}/kubesphere/multus-cni:v3.8
calico:
# when cluster node > 50. it default true.
typha: "{%if (groups['k8s_cluster']|length > 50) %}true{% else %}false{% endif %}"
typha: |
{{- if gt (.groups.k8s_cluster | default list | len) 50 -}}
true
{{- else -}}
false
{{- end -}}
veth_mtu: 0
ipip_mode: Always
vxlan_mode: Never
@ -24,38 +48,65 @@ cni:
# true is enabled
default_ip_pool: true
# image
cni_image: "{{ dockerio_registry }}/calico/cni:{{ calico_version }}"
node_image: "{{ dockerio_registry }}/calico/node:{{ calico_version }}"
kube_controller_image: "{{ dockerio_registry }}/calico/kube-controllers:{{ calico_version }}"
typha_image: "{{ dockerio_registry }}/calico/typha:{{ calico_version }}"
cni_image: |
{{ .dockerio_registry }}/calico/cni:{{ .calico_version }}
node_image: |
{{ .dockerio_registry }}/calico/node:{{ .calico_version }}
kube_controller_image: |
{{ .dockerio_registry }}/calico/kube-controllers:{{ .calico_version }}
typha_image: |
{{ .dockerio_registry }}/calico/typha:{{ .calico_version }}
replicas: 1
node_selector: {}
flannel:
# https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md
backend: vxlan
cni_plugin_image: "{{ dockerio_registry }}/flannel/flannel-cni-plugin:v1.4.0-flannel1"
flannel_image: "{{ dockerio_registry }}/flannel/flannel:{{ flannel_version }}"
cni_plugin_image: |
{{ .dockerio_registry }}/flannel/flannel-cni-plugin:v1.4.0-flannel1
flannel_image: |
{{ .dockerio_registry }}/flannel/flannel:{{ .flannel_version }}
cilium:
# image repo
cilium_repository: "{{ quayio_registry }}/cilium/cilium"
certgen_repository: "{{ quayio_registry }}/cilium/certgen"
hubble_relay_repository: "{{ quayio_registry }}/cilium/hubble-relay"
hubble_ui_backend_repository: "{{ quayio_registry }}/cilium/hubble-ui-backend"
hubble_ui_repository: "{{ quayio_registry }}/cilium/hubble-ui"
cilium_envoy_repository: "{{ quayio_registry }}/cilium/cilium-envoy"
cilium_etcd_operator_repository: "{{ quayio_registry }}/cilium/cilium-etcd-operator"
operator_repository: "{{ quayio_registry }}/cilium/operator"
startup_script_repository: "{{ quayio_registry }}/cilium/startup-script"
clustermesh_apiserver_repository: "{{ quayio_registry }}/cilium/clustermesh-apiserver"
busybox_repository: "{{ dockerio_registry }}/library/busybox"
spire_agent_repository: "{{ ghcrio_registry }}/spiffe/spire-agent"
spire_server_repository: "{{ ghcrio_registry }}/spiffe/spire-server"
k8s_endpoint: "{% if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ groups['kube_control_plane']|first }}{% endif %}"
k8s_port: "{{ kubernetes.apiserver.port|default_if_none:6443 }}"
cilium_repository: |
{{ .quayio_registry }}/cilium/cilium
certgen_repository: |
{{ .quayio_registry }}/cilium/certgen
hubble_relay_repository: |
{{ .quayio_registry }}/cilium/hubble-relay
hubble_ui_backend_repository: |
{{ .quayio_registry }}/cilium/hubble-ui-backend
hubble_ui_repository: |
{{ .quayio_registry }}/cilium/hubble-ui
cilium_envoy_repository: |
{{ .quayio_registry }}/cilium/cilium-envoy
cilium_etcd_operator_repository: |
{{ .quayio_registry }}/cilium/cilium-etcd-operator
operator_repository: |
{{ .quayio_registry }}/cilium/operator
startup_script_repository: |
{{ .quayio_registry }}/cilium/startup-script
clustermesh_apiserver_repository: |
{{ .quayio_registry }}/cilium/clustermesh-apiserver
busybox_repository: |
{{ .dockerio_registry }}/library/busybox
spire_agent_repository: |
{{ .ghcrio_registry }}/spiffe/spire-agent
spire_server_repository: |
{{ .ghcrio_registry }}/spiffe/spire-server
k8s_endpoint: |
{{- if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") -}}
{{- .kubernetes.control_plane_endpoint -}}
{{- else -}}
{{- .groups.kube_control_plane | default list | first -}}
{{- end -}}
k8s_port: |
{{- .kubernetes.apiserver.port | default 6443 -}}
kubeovn:
replica: 1
registry: "{{ dockerio_registry }}/kubeovn"
registry: |
{{ .dockerio_registry }}/kubeovn
hybridnet:
registry: "{{ dockerio_registry }}"
registry: |
{{- .dockerio_registry -}}
# hybridnet_image: hybridnetdev/hybridnet
# hybridnet_tag: v0.8.8

View File

@ -1,9 +1,11 @@
---
- name: Generate calico manifest
template:
src: "calico/{{ calico_version|split:'.'|slice:':2'|join:'.' }}.yaml"
dest: "/etc/kubernetes/cni/calico-{{ calico_version }}.yaml"
src: |
calico/{{ slice (.calico_version | splitList ".") 0 2 | join "." }}.yaml
dest: |
/etc/kubernetes/cni/calico-{{ .calico_version }}.yaml
- name: Apply calico
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/cni/calico-{{ calico_version }}.yaml --force
kubectl apply -f /etc/kubernetes/cni/calico-{{ .calico_version }}.yaml --force

View File

@ -1,33 +1,35 @@
---
- name: Sync cilium helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/cilium-{{ cilium_version }}.tgz"
dest: "/etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz"
src: |
{{ .work_dir }}/kubekey/cni/cilium-{{ .cilium_version }}.tgz
dest: |
/etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
- name: Install cilium
command: |
helm install cilium /etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz --namespace kube-system \
--set image.repository={{ cilium_repository }} \
--set preflight.image.repository={{ cilium_repository }} \
--set certgen.image.repository={{ certgen_repository }} \
--set hubble.relay.image.repository={{ hubble_relay_repository }} \
--set hubble.ui.backend.image.repository={{ hubble_ui_backend_repository }} \
--set hubble.ui.frontend.image.repository={{ hubble_ui_repository }} \
--set envoy.image.repository={{ cilium_envoy_repository }} \
--set etcd.image.repository={{ cilium_etcd_operator_repository }} \
--set operator.image.repository={{ operator_repository }} \
--set nodeinit.image.repository={{ startup_script_repository }} \
--set clustermesh.apiserver.image.repository={{ clustermesh_apiserver_repository }} \
--set authentication.mutual.spire.install.initImage.image.repository={{ busybox_repository }} \
--set authentication.mutual.spire.install.agent.image.repository={{ spire_agent_repository }} \
--set authentication.mutual.spire.install.server.image.repository={{ spire_server_repository }} \
--set operator.replicas={{ cni.cilium.operator_replicas }} \
--set ipv6.enabled={{ cni.ipv6_support }} \
--set ipv4NativeRoutingCIDR: {{ cni.kube_pods_v4_cidr }} \
{% if (cni.ipv6_support) %}
--set ipv6NativeRoutingCIDR: {{ cni.kube_pods_v6_cidr }} \
{% endif %}
{% if (cni.kube_proxy) %}
--set kubeProxyReplacement=strict --set k8sServiceHost={{ cni.cilium.k8s_endpoint }} --set k8sServicePort={{ cni.cilium.k8s_port }}
{% endif %}
helm install cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz --namespace kube-system \
--set image.repository={{ .cni.cilium.cilium_repository }} \
--set preflight.image.repository={{ .cni.cilium.cilium_repository }} \
--set certgen.image.repository={{ .cni.cilium.certgen_repository }} \
--set hubble.relay.image.repository={{ .cni.cilium.hubble_relay_repository }} \
--set hubble.ui.backend.image.repository={{ .cni.cilium.hubble_ui_backend_repository }} \
--set hubble.ui.frontend.image.repository={{ .cni.cilium.hubble_ui_repository }} \
--set envoy.image.repository={{ .cni.cilium.cilium_envoy_repository }} \
--set etcd.image.repository={{ .cni.cilium.cilium_etcd_operator_repository }} \
--set operator.image.repository={{ .cni.cilium.operator_repository }} \
--set nodeinit.image.repository={{ .cni.cilium.startup_script_repository }} \
--set clustermesh.apiserver.image.repository={{ .cni.cilium.clustermesh_apiserver_repository }} \
--set authentication.mutual.spire.install.initImage.image.repository={{ .cni.cilium.busybox_repository }} \
--set authentication.mutual.spire.install.agent.image.repository={{ .cni.cilium.spire_agent_repository }} \
--set authentication.mutual.spire.install.server.image.repository={{ .cni.cilium.spire_server_repository }} \
--set operator.replicas={{ .cni.cilium.operator_replicas }} \
--set ipv6.enabled={{ .cni.ipv6_support }} \
--set ipv4NativeRoutingCIDR: {{ .cni.kube_pods_v4_cidr }} \
{{- if .cni.ipv6_support -}}
--set ipv6NativeRoutingCIDR: {{ .cni.kube_pods_v6_cidr }} \
{{- end -}}
{{- if .cni.kube_proxy -}}
--set kubeProxyReplacement=strict --set k8sServiceHost={{ .cni.cilium.k8s_endpoint }} --set k8sServicePort={{ .cni.cilium.k8s_port }}
{{- end -}}

View File

@ -2,9 +2,10 @@
# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
- name: Generate flannel manifest
template:
src: "flannel/flannel.yaml"
dest: "/etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml"
src: flannel/flannel.yaml
dest: |
/etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml
- name: Apply calico
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml
kubectl apply -f /etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml

View File

@ -1,17 +1,19 @@
---
- name: Sync hybridnet helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/hybridnet-{{ hybridnet_version }}.tgz"
dest: "/etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz"
src: |
{{ .work_dir }}/kubekey/cni/hybridnet-{{ .hybridnet_version }}.tgz
dest: |
/etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz
# https://artifacthub.io/packages/helm/hybridnet/hybridnet
- name: Install hybridnet
command: |
helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz --namespace kube-system \
{% if (cni.hybridnet.hybridnet_image|defined && cni.hybridnet.hybridnet_image != '') %}
--set images.hybridnet.image={{ cni.hybridnet.hybridnet_image }} \
{% endif %}
{% if (cni.hybridnet.hybridnet_tag|defined && cni.hybridnet.hybridnet_tag != '') %}
--set images.hybridnet.tag={{ cni.hybridnet.hybridnet_tag }} \
{% endif %}
--set image.registryURL={{ cni.hybridnet.registry }} \
helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz --namespace kube-system \
{{- if ne .cni.hybridnet.hybridnet_image "" -}}
--set images.hybridnet.image={{ .cni.hybridnet.hybridnet_image }} \
{{- end -}}
{{- if ne .cni.hybridnet.hybridnet_tag "" -}}
--set images.hybridnet.tag={{ .cni.hybridnet.hybridnet_tag }} \
{{- end -}}
--set image.registryURL={{ .cni.hybridnet.registry }} \

View File

@ -7,18 +7,24 @@
# kubeovn-0.1.0.tgz is helm version not helm appVersion
- name: Sync kubeovn helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/kubeovn-{{ kubeovn_version }}.tgz"
dest: "/etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz"
src: |
{{ .work_dir }}/kubekey/cni/kubeovn-{{ .kubeovn_version }}.tgz
dest: |
/etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
- name: Install kubeovn
command: |
helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz --set replicaCount={{ cni.kubeovn.replica }} \
--set MASTER_NODES={% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %} \
--set global.registry.address={{ cni.kubeovn.registry }} \
--set ipv4.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
{% if (cni.ipv6_support) %}
helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz --set replicaCount={{ .cni.kubeovn.replica }} \
{{ $ips := list }}
{{- range .groups.kube_control_plane | default list -}}
{{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
{{- end -}}
--set MASTER_NODES={{ $ips |join "," }} \
--set global.registry.address={{ .cni.kubeovn.registry }} \
--set ipv4.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
{{- if .cni.ipv6_support -}}
--set networking.NET_STACK=dual_stack \
--set dual_stack.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }},{{ cni.kubeovn.kube_pods_v6_cidr }} \
--set dual_stack.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
{% endif %}
--set dual_stack.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }},{{ .cni.kubeovn.kube_pods_v6_cidr }} \
--set dual_stack.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
{{- end -}}

View File

@ -1,18 +1,18 @@
---
- include_tasks: calico.yaml
when: cni.kube_network_plugin == "calico"
when: .cni.kube_network_plugin | eq "calico"
- include_tasks: flannel.yaml
when: cni.kube_network_plugin == "flannel"
when: .cni.kube_network_plugin | eq "flannel"
- include_tasks: cilium.yaml
when: cni.kube_network_plugin == "cilium"
when: .cni.kube_network_plugin | eq "cilium"
- include_tasks: kubeovn.yaml
when: cni.kube_network_plugin == "kubeovn"
when: .cni.kube_network_plugin | eq "kubeovn"
- include_tasks: hybridnet.yaml
when: cni.kube_network_plugin == "hyvbridnet"
when: .cni.kube_network_plugin | eq "hyvbridnet"
- include_tasks: multus.yaml
when: cni.multus.enabled
when: .cni.multus.enabled

View File

@ -2,7 +2,7 @@
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: {{ cni.api_version_policy }}
apiVersion: {{ .cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
@ -15,12 +15,12 @@ spec:
matchLabels:
k8s-app: calico-kube-controllers
{% if (cni.calico.typha) %}
{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: {{ cni.api_version_policy }}
apiVersion: {{ .cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-typha
@ -32,4 +32,4 @@ spec:
selector:
matchLabels:
k8s-app: calico-typha
{% endif %}
{{- end }}

View File

@ -29,14 +29,14 @@ metadata:
namespace: kube-system
data:
# You must set a non-zero value for Typha replicas below.
typha_service_name: {% if (cni.calico.typha) %}"calico-typha"{% else %}"none"{% endif %}
typha_service_name: "{{ if .cni.calico.typha }}calico-typha{{ else }}none{{ end }}"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "{{ cni.calico.veth_mtu }}"
veth_mtu: "{{ .cni.calico.veth_mtu }}"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@ -4715,7 +4715,7 @@ subjects:
name: calico-cni-plugin
namespace: kube-system
{% if (cni.calico.typha) %}
{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
@ -4736,8 +4736,7 @@ spec:
name: calico-typha
selector:
k8s-app: calico-typha
{% endif %}
{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
@ -4785,7 +4784,7 @@ spec:
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: {{ cni.calico.cni_image }}
image: {{ .cni.calico.cni_image }}
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
@ -4813,7 +4812,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{ cni.calico.cni_image }}
image: {{ .cni.calico.cni_image }}
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/install"]
envFrom:
@ -4856,7 +4855,7 @@ spec:
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
- name: "mount-bpffs"
image: {{ cni.calico.node_image }}
image: {{ .cni.calico.node_image }}
imagePullPolicy: IfNotPresent
command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
@ -4882,7 +4881,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{ cni.calico.node_image }}
image: {{ .cni.calico.node_image }}
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
@ -4893,14 +4892,14 @@ spec:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
{% if (cni.calico.typha) %}
{{- if .cni.calico.typha }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
{% endif %}
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
@ -4927,36 +4926,36 @@ spec:
value: "can-reach=$(NODEIP)"
- name: IP
value: "autodetect"
{% if (cni.ipv6_support) %}
{{- if .cni.ipv6_support }}
- name: IP6
value: "autodetect"
{% endif %}
{{- end }}
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "{{ cni.calico.ipip_mode }}"
value: "{{ .cni.calico.ipip_mode }}"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "{{ cni.calico.vxlan_mode }}"
{% if (cni.calico.ipv4pool_nat_outgoing) %}
value: "{{ .cni.calico.vxlan_mode }}"
{{- if .cni.calico.ipv4pool_nat_outgoing }}
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "true"
{% else %}
{{- else }}
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "false"
{% endif %}
{% if (cni.ipv6_support) %}
{{- end }}
{{- if .cni.ipv6_support }}
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Always"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "true"
{% else %}
{{- else }}
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "false"
{% endif %}
{{- end }}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
@ -4975,43 +4974,43 @@ spec:
configMapKeyRef:
name: calico-config
key: veth_mtu
{% if cni.calico.default_ip_pool %}
{{- if .cni.calico.default_ip_pool }}
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect.
- name: CALICO_IPV4POOL_CIDR
value: "{{ cni.kube_pods_v4_cidr }}"
value: "{{ .cni.kube_pods_v4_cidr }}"
- name: CALICO_IPV4POOL_BLOCK_SIZE
value: "{{ cni.node_cidr_mask_size }}"
{% if (cni.ipv6_support) %}
value: "{{ .cni.node_cidr_mask_size }}"
{{- if .cni.ipv6_support }}
- name: CALICO_IPV6POOL_CIDR
value: "{{ cni.kube_pods_v6_cidr }}"
value: "{{ .cni.kube_pods_v6_cidr }}"
- name: CALICO_IPV6POOL_BLOCK_SIZE
value: "120"
{% endif %}
{% else %}
{{- end }}
{{- else }}
- name: NO_DEFAULT_POOLS
value: "true"
- name: CALICO_IPV4POOL_CIDR
value: ""
{% if (cni.ipv6_support) %}
{{- if .cni.ipv6_support }}
- name: CALICO_IPV6POOL_CIDR
value: ""
{% endif %}
{% endif %}
{{- end }}
{{- end }}
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
{% if (cni.ipv6_support) %}
{{- if .cni.ipv6_support }}
- name: FELIX_IPV6SUPPORT
value: "true"
{% else %}
{{- else }}
- name: FELIX_IPV6SUPPORT
value: "false"
{% endif %}
{{- end }}
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_DEVICEROUTESOURCEADDRESS
@ -5135,7 +5134,7 @@ metadata:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: {{ cni.calico.replicas }}
replicas: {{ .cni.calico.replicas }}
selector:
matchLabels:
k8s-app: calico-kube-controllers
@ -5150,7 +5149,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
{{ cni.calico.node_selector|to_yaml:8|safe }}
{{ .cni.calico.node_selector|to_yaml:8|safe }}
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
@ -5175,7 +5174,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: {{ cni.calico.kube_controller_image }}
image: {{ .cni.calico.kube_controller_image }}
imagePullPolicy: IfNotPresent
env:
# Choose which controllers to run.
@ -5199,7 +5198,7 @@ spec:
- -r
periodSeconds: 10
{% if (cni.calico.typha) %}
{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Deployment of Typha to back the above service.
@ -5218,7 +5217,7 @@ spec:
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ cni.calico.replicas }}
replicas: {{ .cni.calico.replicas }}
revisionHistoryLimit: 2
selector:
matchLabels:
@ -5245,7 +5244,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
{{ cni.calico.node_selector|to_yaml:8|safe }}
{{ .cni.calico.node_selector|to_yaml:8|safe }}
hostNetwork: true
# Typha supports graceful shut down, disconnecting clients slowly during the grace period.
# The TYPHA_SHUTDOWNTIMEOUTSECS env var should be kept in sync with this value.
@ -5279,7 +5278,7 @@ spec:
securityContext:
fsGroup: 65534
containers:
- image: {{ cni.calico.typha_image }}
- image: {{ .cni.calico.typha_image }}
imagePullPolicy: IfNotPresent
name: calico-typha
ports:
@ -5336,4 +5335,4 @@ spec:
host: localhost
periodSeconds: 10
timeoutSeconds: 10
{% endif %}
{{- end }}

View File

@ -90,14 +90,14 @@ data:
}
net-conf.json: |
{
"Network": "{{ cni.kube_pods_v4_cidr }}",
{% if (cni.ipv6_support) %}
"Network": "{{ .cni.kube_pods_v4_cidr }}",
{{- if .cni.ipv6_support }}
"EnableIPv6": true,
"IPv6Network":"{{ cni.kube_pods_v6_cidr }}",
{% endif %}
"EnableNFTables": {{ cni.kube_proxy }},
"IPv6Network":"{{ .cni.kube_pods_v6_cidr }}",
{{- end }}
"EnableNFTables": {{ .cni.kube_proxy }},
"Backend": {
"Type": "{{ cni.flannel.backend }}"
"Type": "{{ .cni.flannel.backend }}"
}
}
---
@ -137,7 +137,7 @@ spec:
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: {{ cni.flannel.cni_plugin_image }}
image: {{ .cni.flannel.cni_plugin_image }}
command:
- cp
args:
@ -148,7 +148,7 @@ spec:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: {{ cni.flannel.flannel_image }}
image: {{ .cni.flannel.flannel_image }}
command:
- cp
args:
@ -162,7 +162,7 @@ spec:
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: {{ cni.flannel.flannel_image }}
image: {{ .cni.flannel.flannel_image }}
command:
- /opt/bin/flanneld
args:

View File

@ -169,7 +169,7 @@ spec:
serviceAccountName: multus
containers:
- name: kube-multus
image: {{ cni.multus.image }}
image: {{ .cni.multus.image }}
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"

View File

@ -1,3 +1,4 @@
kata:
enabled: false
image: kubesphere/kata-deploy:stable
image: |
{{ .dockerio_registry }}/kubesphere/kata-deploy:stable

View File

@ -1,11 +1,11 @@
---
- name: Generate kata deploy file
template:
src: "kata-deploy.yaml"
dest: "/etc/kubernetes/addons/kata-deploy.yaml"
when: kata.enabled
src: kata-deploy.yaml
dest: /etc/kubernetes/addons/kata-deploy.yaml
when: .kata.enabled
- name: Deploy kata
command: |
kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml
when: kata.enabled
when: .kata.enabled

View File

@ -44,7 +44,7 @@ spec:
serviceAccountName: kata-label-node
containers:
- name: kube-kata
image: {{ kata.image }}
image: {{ .kata.image }}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:

View File

@ -1,3 +1,4 @@
nfd:
enabled: false
image: kubesphere/node-feature-discovery:v0.10.0
image: |
{{ .dockerio_registry }}/kubesphere/node-feature-discovery:v0.10.0

View File

@ -1,11 +1,11 @@
---
- name: Generate nfd deploy file
template:
src: "nfd-deploy.yaml"
dest: "/etc/kubernetes/addons/nfd-deploy.yaml"
when: nfd.enabled
src: nfd-deploy.yaml
dest: /etc/kubernetes/addons/nfd-deploy.yaml
when: .nfd.enabled
- name: Deploy nfd
command: |
kubectl apply -f /etc/kubernetes/addons/nfd-deploy.yaml
when: nfd.enabled
when: .nfd.enabled

View File

@ -9,7 +9,6 @@ kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: nodefeaturerules.nfd.k8s-sigs.io
spec:
group: nfd.k8s-sigs.io
@ -500,7 +499,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ nfd.image }}
image: {{ .nfd.image }}
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
@ -564,7 +563,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ nfd.image }}
image: {{ .nfd.image }}
imagePullPolicy: IfNotPresent
name: nfd-worker
securityContext:

View File

@ -2,11 +2,14 @@ sc:
local:
enabled: true
default: true
provisioner_image: openebs/provisioner-localpv:3.3.0
linux_utils_image: openebs/linux-utils:3.3.0
provisioner_image: |
{{ .dockerio_registry }}/openebs/provisioner-localpv:3.3.0
linux_utils_image: |
{{ .dockerio_registry }}/openebs/linux-utils:3.3.0
path: /var/openebs/local
nfs: # each k8s_cluster node should install nfs-utils
enabled: false
default: false
server: "{{ groups['nfs']|first }}"
server: |
{{ groups.nfs | first }}
path: /share/kubernetes

View File

@ -1,9 +1,9 @@
---
- name: Generate local manifest
template:
src: "local-volume.yaml"
dest: "/etc/kubernetes/addons/local-volume.yaml"
src: local-volume.yaml
dest: /etc/kubernetes/addons/local-volume.yaml
- name: deploy local
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/local-volume.yaml
kubectl apply -f /etc/kubernetes/addons/local-volume.yaml

View File

@ -1,6 +1,6 @@
---
- include_tasks: local.yaml
when: sc.local.enabled
when: .sc.local.enabled
- include_tasks: nfs.yaml
when: sc.nfs.enabled
when: .sc.nfs.enabled

View File

@ -1,11 +1,13 @@
---
- name: Sync nfs provisioner helm to remote
copy:
src: "{{ work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
dest: "/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
src: |
{{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
dest: |
/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
- name: Deploy nfs provisioner
command: |
helm install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz --namespace kube-system \
--set nfs.server={{ sc.nfs.server }} --set nfs.path={{ sc.nfs.path }} \
--set storageClass.defaultClass={% if (sc.local.default) %}true{% else %}false{% endif %}
helm upgrade --install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz --namespace kube-system \
--set nfs.server={{ .sc.nfs.server }} --set nfs.path={{ .sc.nfs.path }} \
--set storageClass.defaultClass={{ if .sc.local.default }}true{{ else }}false{{ end }}

View File

@ -6,13 +6,13 @@ metadata:
name: local
annotations:
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
storageclass.beta.kubernetes.io/is-default-class: {% if (sc.local.default) %}"true"{% else %}"false"{% endif %}
storageclass.beta.kubernetes.io/is-default-class: "{{ if .sc.local.default }}true{{ else }}false{{ end }}"
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "{{ sc.local.path }}"
value: "{{ .sc.local.path }}"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
@ -100,7 +100,7 @@ spec:
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: IfNotPresent
image: {{ sc.local.provisioner_image }}
image: {{ .sc.local.provisioner_image }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
@ -131,7 +131,7 @@ spec:
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator-lite"
- name: OPENEBS_IO_HELPER_IMAGE
value: "{{ sc.local.linux_utils_image }}"
value: "{{ .sc.local.linux_utils_image }}"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
#- name: LEADER_ELECTION_ENABLED

View File

@ -2,20 +2,24 @@
- name: Sync ca file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/ssl/etcd/ssl/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: /etc/ssl/etcd/ssl/ca.crt
- name: Sync etcd cert file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/ssl/etcd/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/etcd.crt
dest: /etc/ssl/etcd/ssl/server.crt
- name: Sync etcd key file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/ssl/etcd/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/etcd.key
dest: |
/etc/ssl/etcd/ssl/server.key
- name: Restart etcd service
tags: ["certs"]

View File

@ -2,15 +2,18 @@
- name: Sync etcd ca file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/kubernetes/pki/etcd/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: /etc/kubernetes/pki/etcd/ca.crt
- name: Sync etcd cert files to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/kubernetes/pki/etcd/client.crt"
src: |
{{ .work_dir }}/kubekey/pki/etcd.crt
dest: /etc/kubernetes/pki/etcd/client.crt
- name: Sync etcd key files to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/kubernetes/pki/etcd/client.key"
src: |
{{ .work_dir }}/kubekey/pki/etcd.key
dest: /etc/kubernetes/pki/etcd/client.key

View File

@ -9,41 +9,43 @@
tags: ["certs"]
run_once: true
command: |
{% if (kubeadm_install_version.stdout|version:'<v1.20.0') %}
{{- if .kubeadm_install_version.stdout | semverCompare "<v1.20.0" -}}
/usr/local/bin/kubeadm alpha certs renew apiserver
/usr/local/bin/kubeadm alpha certs renew apiserver-kubelet-client
/usr/local/bin/kubeadm alpha certs renew front-proxy-client
/usr/local/bin/kubeadm alpha certs renew admin.conf
/usr/local/bin/kubeadm alpha certs renew controller-manager.conf
/usr/local/bin/kubeadm alpha certs renew scheduler.conf
{% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd -}}
/usr/local/bin/kubeadm alpha certs renew etcd-healthcheck-client
/usr/local/bin/kubeadm alpha certs renew etcd-peer
/usr/local/bin/kubeadm alpha certs renew etcd-server
{% endif %}
{% else %}
{{- end -}}
{{- else -}}
/usr/local/bin/kubeadm certs renew apiserver
/usr/local/bin/kubeadm certs renew apiserver-kubelet-client
/usr/local/bin/kubeadm certs renew front-proxy-client
/usr/local/bin/kubeadm certs renew admin.conf
/usr/local/bin/kubeadm certs renew controller-manager.conf
/usr/local/bin/kubeadm certs renew scheduler.conf
{% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd -}}
/usr/local/bin/kubeadm certs renew etcd-healthcheck-client
/usr/local/bin/kubeadm certs renew etcd-peer
/usr/local/bin/kubeadm certs renew etcd-server
{% endif %}
{% endif %}
{{- end -}}
{{- end -}}
- name: Fetch kubeconfig to local
tags: ["certs"]
run_once: true
fetch:
src: /etc/kubernetes/admin.conf
dest: "{{ work_dir }}/kubekey/kubeconfig"
dest: |
{{ .work_dir }}/kubekey/kubeconfig
- name: Sync kubeconfig to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/kubeconfig"
src: |
{{ .work_dir }}/kubekey/kubeconfig
dest: /root/.kube/config

View File

@ -5,24 +5,24 @@
- include_tasks: etcd.yaml
tags: ["certs"]
when:
- kubernetes.etcd.deployment_type=='external' && groups['etcd']|length > 0
- renew_etcd
- and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
- .renew_etcd
- name: Reload kubernetes pods
tags: [ "certs" ]
command: |
{% if (cri.container_manager == "docker") %}
{{- if .cri.container_manager | eq "docker" -}}
docker ps -af name=k8s_PODS_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
docker ps -af name=k8s_PODS_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f
docker ps -af name=k8s_PODS_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f
{% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
{{- if and (.kubernetes.etcd.deployment_type | eq "docker") .renew_etcd -}}
docker ps -af name=k8s_PODS_etcd* -q | xargs --no-run-if-empty docker rm -f
{% endif %}
{% else %}
{{- end -}}
{{- else -}}
crictl pods --name kube-apiserver-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
crictl pods --name kube-controller-manager-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
crictl pods --name kube-scheduler-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
{% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
{{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd -}}
crictl pods --name etcd-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
{% endif %}
{% endif %}
{{- end -}}
{{- end -}}

View File

@ -2,14 +2,18 @@
- name: Sync image registry cert file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/opt/harbor/{{ .harbor_version }}/ssl/server.crt
- name: Sync image registry key file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/opt/harbor/{{ .harbor_version }}/ssl/server.key
- name: Restart harbor service
tags: ["certs"]

View File

@ -1,6 +1,6 @@
- include_tasks: harbor.yaml
tags: ["certs"]
when: image_registry.type == 'harbor'
when: .image_registry.type | eq "harbor"
- include_tasks: registry.yaml
tags: ["certs"]
when: image_registry.type == 'registry'
when: .image_registry.type | eq "registry"

View File

@ -2,14 +2,18 @@
- name: Sync image registry cert file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/registry/{{ registry_version }}/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/opt/registry/{{ .registry_version }}/ssl/server.crt
- name: Sync image registry key file to remote
tags: ["certs"]
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/registry/{{ registry_version }}/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/opt/registry/{{ .registry_version }}/ssl/server.key
- name: Restart registry service
tags: ["certs"]

View File

@ -2,96 +2,224 @@ work_dir: /kubekey
artifact:
arch: [ "amd64" ]
# offline artifact package for kk.
# artifact_file: /tmp/kubekey.tar.gz
artifact_file: ""
# the md5_file of artifact_file.
# artifact_md5: /tmp/artifact.md5
artifact_md5: ""
# how to generate cert file.support: IfNotPresent, Always
gen_cert_policy: IfNotPresent
artifact_url:
etcd:
amd64: |
{% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
{{- else -}}
https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
{{- else -}}
https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
{{- end -}}
kubeadm:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
{{- end -}}
kubelet:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubelet{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubelet
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubelet
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubelet{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubelet
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubelet
{{- end -}}
kubectl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubectl{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubectl
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubectl
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubectl{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubectl
{{- else -}}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubectl
{{- end -}}
cni:
amd64: |
{% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://github.com/containernetworking/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-amd64-{{ .cni_version }}.tgz
{{- else -}}
https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-amd64-{{ .cni_version }}.tgz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://github.com/containernetworking/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-arm64-{{ .cni_version }}.tgz
{{- else -}}
https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-arm64-{{ .cni_version }}.tgz
{{- end -}}
helm:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-amd64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-amd64.tar.gz
{{- else -}}
https://get.helm.sh/helm-{{ .helm_version }}-linux-amd64.tar.gz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-arm64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-arm64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-arm64.tar.gz
{{- else -}}
https://get.helm.sh/helm-{{ .helm_version }}-linux-arm64.tar.gz
{{- end -}}
crictl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
{{- else -}}
https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
{{- else -}}
https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
{{- end -}}
docker:
amd64: |
{% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
{{- else -}}
https://download.docker.com/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
{{- else -}}
https://download.docker.com/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
{{- end -}}
cridockerd:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
{{- else -}}
https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
{{- else -}}
https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
{{- end -}}
containerd:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
{{- else -}}
https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
{{- else -}}
https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
{{- end -}}
runc:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
{{- else -}}
https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
{{- else -}}
https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
{{- end -}}
calicoctl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
{{- else -}}
https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
{{- else -}}
https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
{{- end -}}
dockercompose:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
{{- else -}}
https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
{{- end -}}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% endif %}
{{- if .kkzone | eq "cn" -}}
https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
{{- else -}}
https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
{{- end -}}
# registry:
# amd64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-amd64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-amd64.tgz{% endif %}
# {{- if .kkzone | eq "cn" -}}
# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-amd64.tgz
# {{- else -}}
# https://github.com/kubesphere/kubekey/releases/download/{{ .registry_version }}/registry-{{ .registry_version }}-linux-amd64.tgz
# {{- end -}}
# arm64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-arm64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-arm64.tgz{% endif %}
# {{- if .kkzone | eq "cn" -}}
# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-arm64.tgz
# {{- else -}}
# https://github.com/kubesphere/kubekey/releases/download/{{ .registry_version }}/registry-{{ .registry_version }}-linux-arm64.tgz
# {{- end -}}
harbor:
amd64: |
{% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% endif %}
{{- if .kkzone | eq "cn" -}}
https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
{{- else -}}
https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
{{- end -}}
# arm64: |
# {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% endif %}
# {{- if .kkzone | eq "cn" -}}
# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz
# {{- else -}}
# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz
# {{- end -}}
# keepalived:
# amd64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% endif %}
# {{- if .kkzone | eq "cn" -}}
# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz
# {{- else -}}
# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz
# {{- end -}}
# arm64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% endif %}
cilium: https://helm.cilium.io/cilium-{{ cilium_version }}.tgz
kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ kubeovn_version }}.tgz
hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ hybridnet_version }}/hybridnet-{{ hybridnet_version }}.tgz
nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz
# {{- if .kkzone | eq "cn" -}}
# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz
# {{- else -}}
# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz
# {{- end -}}
cilium: https://helm.cilium.io/cilium-{{ .cilium_version }}.tgz
kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ .kubeovn_version }}.tgz
hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ .hybridnet_version }}/hybridnet-{{ .hybridnet_version }}.tgz
nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
images:
auth: []
list: []

View File

@ -1,266 +1,252 @@
---
- name: Check binaries for etcd
command: |
artifact_name={{ artifact.artifact_url.etcd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.etcd .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/etcd/{{ .etcd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.etcd[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.etcd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.etcd[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.etcd .item }}
fi
loop: "{{ artifact.arch }}"
when:
- etcd_version | defined && etcd_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .etcd_version (ne .etcd_version "")
- name: Check binaries for kube
command: |
kube_path={{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ item }}
kube_path={{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .item }}
if [ ! -f $kube_path/kubelet ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubelet[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubelet {{ artifact.artifact_url.kubelet[item] }}
curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }}
fi
if [ ! -f $kube_path/kubeadm ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubeadm[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubeadm {{ artifact.artifact_url.kubeadm[item] }}
curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }}
fi
if [ ! -f $kube_path/kubectl ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubectl[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubectl {{ artifact.artifact_url.kubectl[item] }}
curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }}
fi
loop: "{{ artifact.arch }}"
when:
- kube_version | defined && kube_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .kube_version (ne .kube_version "")
- name: Check binaries for cni
command: |
artifact_name={{ artifact.artifact_url.cni[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni/{{ cni_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.cni .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cni/{{ .cni_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cni[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cni[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cni .item }}
fi
loop: "{{ artifact.arch }}"
when:
- cni_version | defined && cni_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .cni_version (ne .cni_version "")
- name: Check binaries for helm
command: |
artifact_name={{ artifact.artifact_url.helm[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.helm .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/helm/{{ .helm_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.helm[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.helm[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }}
fi
loop: "{{ artifact.arch }}"
when:
- helm_version | defined && helm_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .helm_version (ne .helm_version "")
- name: Check binaries for crictl
command: |
artifact_name={{ artifact.artifact_url.crictl[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.crictl .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/crictl/{{ .crictl_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.crictl[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.crictl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.crictl[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.crictl .item }}
fi
loop: "{{ artifact.arch }}"
when:
- crictl_version | defined && crictl_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .crictl_version (ne .crictl_version "")
- name: Check binaries for docker
command: |
artifact_name={{ artifact.artifact_url.docker[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.docker .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.docker[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.docker[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker .item }}
fi
loop: "{{ artifact.arch }}"
when:
- docker_version | defined && docker_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .docker_version (ne .docker_version "")
- name: Check binaries for cridockerd
command: |
artifact_name={{ artifact.artifact_url.cridockerd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.cridockerd .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cri-dockerd/{{ .cridockerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cridockerd[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cridockerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cridockerd[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cridockerd .item }}
fi
loop: "{{ artifact.arch }}"
when:
- cridockerd_version | defined && cridockerd_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .cridockerd_version (ne .docker_version "")
- name: Check binaries for containerd
command: |
artifact_name={{ artifact.artifact_url.containerd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.containerd .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/containerd/{{ .containerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.containerd[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.containerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.containerd[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.containerd .item }}
fi
loop: "{{ artifact.arch }}"
when:
- containerd_version | defined && containerd_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .containerd_version (ne .containerd_version "")
- name: Check binaries for runc
command: |
artifact_name={{ artifact.artifact_url.runc[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.runc .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/runc/{{ .runc_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.runc[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.runc .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.runc[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.runc .item }}
fi
loop: "{{ artifact.arch }}"
when:
- runc_version | defined && runc_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .runc_version (ne .runc_version "")
- name: Check binaries for calicoctl
command: |
artifact_name={{ artifact.artifact_url.calicoctl[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni/{{ calico_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.calicoctl .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cni/{{ .calico_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.calicoctl[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.calicoctl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.calicoctl[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.calicoctl .item }}
fi
loop: "{{ artifact.arch }}"
when:
- calico_version | defined && calico_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .calico_version (ne .calico_version "")
- name: Check binaries for registry
command: |
artifact_name={{ artifact.artifact_url.registry[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.registry .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/image-registry/registry/{{ .registry_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.registry[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.registry .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.registry[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.registry .item }}
fi
loop: "{{ artifact.arch }}"
when:
- registry_version | defined && registry_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .registry_version (ne .registry_version "")
- name: Check binaries for docker-compose
command: |
compose_name=docker-compose
compose_path={{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ item }}
compose_path={{ .work_dir }}/kubekey/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .item }}
if [ ! -f $compose_path/$compose_name ]; then
mkdir -p $compose_path
# download online
curl -L -o $compose_path/$compose_name {{ artifact.artifact_url.dockercompose[item] }}
curl -L -o $compose_path/$compose_name {{ get .artifact.artifact_url.dockercompose .item }}
fi
loop: "{{ artifact.arch }}"
when:
- dockercompose_version | defined && dockercompose_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .dockercompose_version (ne .dockercompose_version "")
- name: Check binaries for harbor
command: |
harbor_name={{ artifact.artifact_url.harbor[item]|split:"/"|last }}
harbor_path={{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ item }}
harbor_name={{ get .artifact.artifact_url.harbor .item | splitList "/" | last }}
harbor_path={{ .work_dir }}/kubekey/image-registry/harbor/{{ .harbor_version }}/{{ .item }}
if [ ! -f $harbor_path/$harbor_name ]; then
mkdir -p $harbor_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.harbor[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.harbor .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $harbor_path/$harbor_name {{ artifact.artifact_url.harbor[item] }}
curl -L -o $harbor_path/$harbor_name {{ get .artifact.artifact_url.harbor .item }}
fi
loop: "{{ artifact.arch }}"
when:
- harbor_version | defined && harbor_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .harbor_version (ne .harbor_version "")
- name: Check binaries for keepalived
command: |
artifact_name={{ artifact.artifact_url.keepalived[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ item }}
artifact_name={{ get .artifact.artifact_url.keepalived .item | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/image-registry/keepalived/{{ .keepalived_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.keepalived[item] }})
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.keepalived .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.keepalived[item] }}
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.keepalived .item }}
fi
loop: "{{ artifact.arch }}"
when:
- keepalived_version | defined && keepalived_version != ""
loop: "{{ .artifact.arch | toJson }}"
when: and .keepalived_version (ne .keepalived_version "")

View File

@ -1,44 +1,44 @@
---
- name: Check binaries for cilium
command: |
artifact_name={{ artifact.artifact_url.cilium|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
artifact_name={{ .artifact.artifact_url.cilium | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.cilium }}
cd $artifact_path && helm pull {{ .artifact.artifact_url.cilium }}
fi
when: cilium_version | defined
when: and .cilium_version (ne .cilium_version "")
- name: Check binaries for kubeovn
command: |
artifact_name={{ artifact.artifact_url.kubeovn|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
artifact_name={{ .artifact.artifact_url.kubeovn | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.kubeovn }}
cd $artifact_path && helm pull {{ .artifact.artifact_url.kubeovn }}
fi
when: kubeovn_version | defined
when: and .kubeovn_version (ne .kubeovn_version "")
- name: Check binaries for hybridnet
command: |
artifact_name={{ artifact.artifact_url.hybridnet|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
artifact_name={{ .artifact.artifact_url.hybridnet | splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.hybridnet }}
cd $artifact_path && helm pull {{ .artifact.artifact_url.hybridnet }}
fi
when: hybridnet_version | defined
when: and .hybridnet_version (ne .hybridnet_version "")
- name: Check binaries for nfs_provisioner
command: |
artifact_name={{ artifact.artifact_url.nfs_provisioner|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/sc
artifact_name={{ .artifact.artifact_url.nfs_provisioner |splitList "/" | last }}
artifact_path={{ .work_dir }}/kubekey/sc
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.nfs_provisioner }}
cd $artifact_path && helm pull {{ .artifact.artifact_url.nfs_provisioner }}
fi
when: nfs_provisioner_version| defined
when: and .nfs_provisioner_version (ne .nfs_provisioner_version "")

View File

@ -2,17 +2,17 @@
- name: Create work_dir
tags: ["always"]
command: |
if [ ! -d "{{ work_dir }}" ]; then
mkdir -p {{ work_dir }}
if [ ! -d "{{ .work_dir }}" ]; then
mkdir -p {{ .work_dir }}
fi
- name: Extract artifact to work_dir
tags: ["always"]
command: |
if [ ! -f "{{ artifact_file }}" ]; then
tar -zxvf {{ artifact_file }} -C {{ work_dir }}
if [ ! -f "{{ .artifact_file }}" ]; then
tar -zxvf {{ .artifact_file }} -C {{ .work_dir }}
fi
when: artifact_file | defined
when: and .artifact_file (ne .artifact_file "")
- name: Download binaries
block:
@ -27,4 +27,4 @@
- name: Chown work_dir to sudo
tags: ["always"]
command: |
chown -R ${SUDO_UID}:${SUDO_GID} {{ work_dir }}
chown -R ${SUDO_UID}:${SUDO_GID} {{ .work_dir }}

View File

@ -3,32 +3,50 @@
gen_cert:
cn: root
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/root.key"
out_cert: "{{ work_dir }}/kubekey/pki/root.crt"
policy: "{{ .artifact.gen_cert_policy }}"
out_key: |
{{ .work_dir }}/kubekey/pki/root.key
out_cert: |
{{ .work_dir }}/kubekey/pki/root.crt
- name: Generate etcd cert file
gen_cert:
root_key: "{{ work_dir }}/kubekey/pki/root.key"
root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
root_key: |
{{ .work_dir }}/kubekey/pki/root.key
root_cert: |
{{ .work_dir }}/kubekey/pki/root.crt
cn: etcd
sans: |
[{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
{{- $ips := list -}}
{{- range .groups.etcd | default list -}}
{{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
{{- end -}}
{{ $ips | toJson }}
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/etcd.key"
out_cert: "{{ work_dir }}/kubekey/pki/etcd.crt"
when: groups['etcd']|length > 0
policy: "{{ .artifact.gen_cert_policy }}"
out_key: |
{{ .work_dir }}/kubekey/pki/etcd.key
out_cert: |
{{ .work_dir }}/kubekey/pki/etcd.crt
when: .groups.etcd | default list | len | lt 0
- name: Generate registry image cert file
gen_cert:
root_key: "{{ work_dir }}/kubekey/pki/root.key"
root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
root_key: |
{{ .work_dir }}/kubekey/pki/root.key
root_cert: |
{{ .work_dir }}/kubekey/pki/root.crt
cn: image_registry
sans: |
[{% for h in groups['image_registry'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
{{- $ips := list -}}
{{- range .groups.image_registry | default list -}}
{{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
{{- end -}}
{{ $ips | toJson }}
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/image_registry.key"
out_cert: "{{ work_dir }}/kubekey/pki/image_registry.crt"
when: groups['image_registry']|length > 0
policy: "{{ .artifact.gen_cert_policy }}"
out_key: |
{{ .work_dir }}/kubekey/pki/image_registry.key
out_cert: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
when: and .groups.image_registry (.groups.image_registry | default list | len | lt 0)

View File

@ -1,10 +1,7 @@
---
- name: Configure ntp server
command: |
chronyConfigFile="/etc/chrony.conf"
if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
chronyConfigFile="/etc/chrony/chrony.conf"
fi
chronyConfigFile={{ if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}"/etc/chrony/chrony.conf"{{ else }}"/etc/chrony.conf"{{ end }}
# clear old server
sed -i '/^server/d' $chronyConfigFile
# disable pool
@ -18,25 +15,25 @@
# add local
echo "local stratum 10" >> $chronyConfigFile
# add server
{% for server in ntp_servers %}
{% for _,v in inventory_hosts %}
{% if (v.inventory_name == server) %}{% set server = v.internal_ipv4%}{% endif %}
{% endfor %}
grep -q '^server {{ server }} iburst' $chronyConfigFile||sed '1a server {{ server }} iburst' -i $chronyConfigFile
{% endfor %}
{{- range $server := .ntp_servers -}}
{{- range $.inventory_hosts -}}
{{- if eq .inventory_name $server -}}
{{- $server = .internal_ipv4 -}}
{{- end -}}
{{- end -}}
grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile
{{- end -}}
- name: Set timezone
command: |
timedatectl set-timezone {{ timezone }}
timedatectl set-timezone {{ .timezone }}
timedatectl set-ntp true
when: timezone | defined
when: or (.ntp_servers | len | lt 0) (.timezone | ne "")
- name: Restart ntp server
command: |
chronyService="chronyd.service"
if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
chronyService="chrony.service"
fi
systemctl restart $chronyService
when:
- ntp_servers | defined or timezone | defined
{{- if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}
systemctl restart chrony.service
{{- end }}
systemctl restart chronyd.service
when: or (.ntp_servers | len | lt 0) (.timezone | ne "")

View File

@ -4,8 +4,9 @@
- name: Sync repository file
ignore_errors: true
copy:
src: "{{ work_dir }}/kubekey/repository/{{ os.release.ID_LIKE }}-{{ os.release.VERSION_ID|safe }}-{{ binary_type.stdout }}.iso"
dest: "/tmp/kubekey/repository.iso"
src: |
{{ .work_dir }}/kubekey/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type.stdout }}.iso
dest: /tmp/kubekey/repository.iso
- name: Mount iso file
command: |
if [ -f "/tmp/kubekey/repository.iso" ]; then
@ -42,7 +43,7 @@
else
apt-get update && apt install -y socat conntrack ipset ebtables chrony ipvsadm
fi
when: os.release.ID_LIKE == "debian"
when: .os.release.ID_LIKE | eq "debian"
- name: Init rhel repository
command: |
now=$(date +"%Y-%m-%d %H:%M:%S")
@ -74,4 +75,4 @@
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
fi
when: os.release.ID_LIKE == "rhel fedora"
when: .os.release.ID_LIKE | eq "rhel fedora"

View File

@ -12,7 +12,8 @@
- name: Set hostname
command: |
hostnamectl set-hostname {{ inventory_name }} && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ inventory_name }}/g' /etc/hosts
hostnamectl set-hostname {{ .inventory_name }} \
&& sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_name }}/g' /etc/hosts
- name: Sync init os to remote
template:

View File

@ -174,11 +174,21 @@ sed -i '/^$/N;/\n$/N;//D' /etc/hosts
cat >>/etc/hosts<<EOF
# kubekey hosts BEGIN
{% for _,hv in inventory_hosts %}
{% if (hv.internal_ipv4|defined) %}{{ hv.internal_ipv4 }} {{ hv.inventory_name }} {{ hv.inventory_name }}.{{ kubernetes.cluster_name|default_if_none:'cluster.local' }}{% endif %}
{% if (hv.internal_ipv6|defined) %}{{ hv.internal_ipv6 }} {{ hv.inventory_name }} {{ hv.inventory_name }}.{{ kubernetes.cluster_name|default_if_none:'cluster.local' }}{% endif %}
{% endfor %}
{{- range .inventory_hosts }}
{{- if and .internal_ipv4 (ne .internal_ipv4 "") }}
{{ printf "%s %s %s.%s" .internal_ipv4 .inventory_name .inventory_name ($.kubernetes.cluster_name | default "cluster.local") }}
{{- end }}
{{- if and .internal_ipv6 (ne .internal_ipv6 "") }}
{{ printf "%s %s %s.%s" .internal_ipv6 .internal_ipv6 .inventory_name ($.kubernetes.cluster_name | default "cluster.local") }}
{{- end }}
{{- end }}
# kubekey hosts END
{{- if ne .kubernetes.kube_vip.address .kubernetes.control_plane_endpoint }}
# kubevip BEGIN
.kubernetes.kube_vip.address .kubernetes.control_plane_endpoint
#kubevip END
{{- end }}
EOF
sync

View File

@ -1,4 +1,14 @@
renew_certs:
enabled: false
is_docker: "{% if (cri.container_manager == 'docker') %}true{% else %}false{% endif %}"
is_kubeadm_alpha: "{% if (kube_version|version:'<v1.20.0') %}true{% else %}false{% endif %}"
is_docker: |
{{- if .cri.container_manager | eq "docker" -}}
true
{{- else -}}
false
{{- end -}}
is_kubeadm_alpha: |
{{- if .kube_version | semverCompare "<v1.20.0" -}}
true
{{- else -}}
false
{{- end -}}

View File

@ -1,9 +1,9 @@
#!/bin/bash
{% if (renew_certs.is_kubeadm_alpha) %}
{{- if .renew_certs.is_kubeadm_alpha }}
kubeadmCerts='/usr/local/bin/kubeadm alpha certs'
{% else %}
{{- else }}
kubeadmCerts='/usr/local/bin/kubeadm certs'
{% endif %}
{{- end }}
getCertValidDays() {
local earliestExpireDate; earliestExpireDate=$(${kubeadmCerts} check-expiration | grep -o "[A-Za-z]\{3,4\}\s\w\w,\s[0-9]\{4,\}\s\w*:\w*\s\w*\s*" | xargs -I {} date -d {} +%s | sort | head -n 1)
local today; today="$(date +%s)"
@ -15,11 +15,11 @@ if [ $(getCertValidDays) -lt 30 ]; then
echo "## Renewing certificates managed by kubeadm ##"
${kubeadmCerts} renew all
echo "## Restarting control plane pods managed by kubeadm ##"
{% if (renew_certs.is_docker) %}
{{- if .renew_certs.is_docker }}
$(which docker | grep docker) ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs $(which docker | grep docker) rm -f
{% else %}
{{- else }}
$(which crictl | grep crictl) pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs $(which crictl | grep crictl) rmp -f
{% endif %}
{{- end }}
echo "## Updating /root/.kube/config ##"
cp /etc/kubernetes/admin.conf /root/.kube/config
fi

View File

@ -1,11 +1,15 @@
cri:
# support: systemd, cgroupfs
cgroup_driver: systemd
sandbox_image: "{{ k8s_registry }}/pause:3.5"
sandbox_image: |
{{ .k8s_registry }}/pause:3.5
# support: containerd,docker,crio
container_manager: docker
# the endpoint of containerd
cri_socket: "{% if (cri.container_manager=='containerd') %}unix:///var/run/containerd.sock{% endif %}"
cri_socket: |
{{- if .cri.container_manager | eq "containerd" -}}
unix:///var/run/containerd.sock
{{- end -}}
# containerd:
# data_root: /var/lib/containerd
docker:
@ -18,6 +22,11 @@ cri:
image_registry:
# ha_vip: 192.168.122.59
auth:
registry: "{% if (image_registry.ha_vip|defined) %}{{ image_registry.ha_vip }}{% else %}{{ groups['image_registry']|first }}{% endif %}"
registry: |
{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") -}}
{{ .image_registry.ha_vip }}
{{- else -}}
{{ .groups.image_registry | default list | first }}
{{- end -}}
username: admin
password: Harbor12345

View File

@ -3,59 +3,60 @@
ignore_errors: true
command: runc --version
register: runc_install_version
- name: Sync runc binary to remote
when: or (.runc_install_version.stderr | ne "") (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
copy:
src: "{{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ binary_type.stdout }}/runc.{{ binary_type.stdout }}"
dest: "/usr/local/bin/runc"
src: |
{{ .work_dir }}/kubekey/runc/{{ .runc_version }}/{{ .binary_type.stdout }}/runc.{{ .binary_type.stdout }}
dest: /usr/local/bin/runc
mode: 0755
when: runc_install_version.stderr != ""
- name: Check if containerd is installed
ignore_errors: true
command: containerd --version
register: containerd_install_version
- name: Sync containerd binary to remote
copy:
src: "{{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ binary_type.stdout }}/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
when: containerd_install_version.stderr != ""
- name: Unpackage containerd binary
command: |
tar -xvf /tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: containerd_install_version.stderr != ""
- name: Generate containerd config file
template:
src: containerd.config
dest: /etc/containerd/config.toml
when: containerd_install_version.stderr != ""
- name: Generate containerd Service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
when: containerd_install_version.stderr != ""
- name: Install containerd
when: or (.containerd_install_version.stderr | ne "") (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
block:
- name: Sync containerd binary to remote
copy:
src: |
{{ .work_dir }}/kubekey/containerd/{{ .containerd_version }}/{{ .binary_type.stdout }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
/tmp/kubekey/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Unpackage containerd binary
command: |
tar -xvf /tmp/kubekey/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate containerd config file
template:
src: containerd.config
dest: /etc/containerd/config.toml
- name: Generate containerd Service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
- name: Start containerd
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
- name: Sync image registry tls to remote
when: groups['image_registry'] > 0
when: .groups.image_registry | default list | len | lt 0
block:
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: |
/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.key"
- name: Start containerd
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
when: containerd_install_version.stderr != ""
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key

View File

@ -4,18 +4,19 @@
command: crictl --version
register: crictl_install_version
- name: Sync crictl binary to remote
copy:
src: "{{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ binary_type.stdout }}/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: crictl_install_version.stderr != ""
- name: Unpackage crictl binary
command: |
tar -xvf /tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: crictl_install_version.stderr != ""
- name: Generate crictl config file
template:
src: crictl.config
dest: /etc/crictl.yaml
- name: Install crictl
when: or (.crictl_install_version.stderr | ne "") (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
block:
- name: Sync crictl binary to remote
copy:
src: |
{{ .work_dir }}/kubekey/crictl/{{ .crictl_version }}/{{ .binary_type.stdout }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
/tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Unpackage crictl binary
command: |
tar -xvf /tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate crictl config file
template:
src: crictl.config
dest: /etc/crictl.yaml

View File

@ -4,30 +4,26 @@
command: cri-dockerd --version
register: cridockerd_install_version
- name: Sync cri-dockerd Binary to remote
copy:
src: "{{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ binary_type.stdout }}/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: cridockerd_install_version.stderr != ""
- name: Generate cri-dockerd config file
template:
src: cri-dockerd.config
dest: /etc/cri-dockerd.yaml
when: cridockerd_install_version.stderr != ""
- name: Unpackage cri-dockerd binary
command: |
tar -xvf /tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: cridockerd_install_version.stderr != ""
- name: Generate cri-dockerd Service file
template:
src: cri-dockerd.service
dest: /etc/systemd/system/cri-dockerd.service
when: cridockerd_install_version.stderr != ""
- name: Start cri-dockerd service
command: |
systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
when: cridockerd_install_version.stderr != ""
- name: Install cri-dockerd
when: or (.cridockerd_install_version.stderr | ne "") (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
block:
- name: Sync cri-dockerd Binary to remote
copy:
src: |
{{ .work_dir }}/kubekey/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type.stdout }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
/tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Generate cri-dockerd config file
template:
src: cri-dockerd.config
dest: /etc/cri-dockerd.yaml
- name: Unpackage cri-dockerd binary
command: |
tar -xvf /tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate cri-dockerd Service file
template:
src: cri-dockerd.service
dest: /etc/systemd/system/cri-dockerd.service
- name: Start cri-dockerd service
command: |
systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service

View File

@ -4,53 +4,53 @@
command: docker --version
register: docker_install_version
- name: Sync docker binary to remote
copy:
src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
when: docker_install_version.stderr != ""
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
when: docker_install_version.stderr != ""
- name: Generate docker config file
template:
src: docker.config
dest: /etc/docker/daemon.json
when: docker_install_version.stderr != ""
- name: Generate docker service file
copy:
src: docker.service
dest: /etc/systemd/system/docker.service
when: docker_install_version.stderr != ""
- name: Generate containerd service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
when: docker_install_version.stderr != ""
- name: Install docker
when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
block:
- name: Sync docker binary to remote
copy:
src: |
{{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
dest: |
/tmp/kubekey/docker-{{ .docker_version }}.tgz
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
- name: Generate docker config file
template:
src: docker.config
dest: /etc/docker/daemon.json
- name: Generate docker service file
copy:
src: docker.service
dest: /etc/systemd/system/docker.service
- name: Generate containerd service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
- name: Sync image registry tls to remote
when: groups['image_registry'] > 0
when: .groups.image_registry | default list | len | lt 0
block:
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: |
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/ca.crt
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.crt
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/server.key"
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
when: docker_install_version.stderr != ""
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.key

View File

@ -4,16 +4,16 @@
# install docker
- include_tasks: install_docker.yaml
when: cri.container_manager == "docker"
when: .cri.container_manager | eq "docker"
# install containerd
- include_tasks: install_containerd.yaml
when: cri.container_manager == "containerd"
when: .cri.container_manager | eq "containerd"
# install cridockerd
- include_tasks: install_cridockerd.yaml
when:
- cri.container_manager == "docker"
- kube_version|version:'>=v1.24.0'
- .cri.container_manager | eq "docker"
- .kube_version | semverCompare ">=v1.24.0"

View File

@ -1,6 +1,6 @@
version = 2
root = {{ cri.containerd.data_root|default_if_none:"/var/lib/containerd" }}
root = {{ .cri.containerd.data_root | default "/var/lib/containerd" }}
state = "/run/containerd"
[grpc]
@ -36,11 +36,11 @@ state = "/run/containerd"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "{{ cri.sandbox_image }}"
sandbox_image = "{{ .cri.sandbox_image }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = {% if (cri.cgroup_driver=="systemd") %}true{% else %}false{% endif %}
SystemdCgroup = {{ if .cri.cgroup_driver | eq "systemd") }}true{{ else }}false{{ end }}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
@ -48,34 +48,37 @@ state = "/run/containerd"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
{% if (cri.registry.mirrors|length > 0) %}
{{- if .cri.registry.mirrors | len | lt 0 }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = {{ cri.registry.mirrors|to_json|safe }}
{% endif %}
{% for ir in cri.registry.insecure_registries %}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ ir }}"]
endpoint = ["http://{{ ir }}"]
{% endfor %}
{% if (cri.registry.auths|length > 0 || groups['image_registry']|length>0) %}
endpoint = {{ .cri.registry.mirrors | toJson }}
{{- end }}
{{- range .cri.registry.insecure_registries }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ . }}"]
endpoint = ["http://{{ . }}"]
{{- end }}
{{- if or (.cri.registry.auths | len | lt 0) (.groups.image_registry | default list | len | lt 0) }}
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ image_registry.auth.registry }}".auth]
username = "{{ image_registry.auth.username }}"
password = "{{ image_registry.auth.password }}"
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ image_registry.auth.registry }}".tls]
ca_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/ca.crt"
cert_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.crt"
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".auth]
username = "{{ .image_registry.auth.username }}"
password = "{{ .image_registry.auth.password }}"
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".tls]
ca_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt"
cert_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt"
key_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.key"
{% for ir in cri.registry.auths %}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".auth]
username = "{{ ir.username }}"
password = "{{ ir.password }}"
{% if (ir.ca_file|defined) %}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".tls]
ca_file = "{{ ir.ca_file }}"
cert_file = "{{ ir.crt_file }}"
key_file = "{{ ir.key_file }}"
insecure_skip_verify = {{ ir.skip_ssl }}
{% endif %}
{% endfor %}
{% endif %}
{{- range .cri.registry.auths }}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth]
username = "{{ .username }}"
password = "{{ .password }}"
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".tls]
{{- if.ca_file }}
ca_file = {{ .ca_file }}
{{- end }}
{{- if .crt_file }}
cert_file = {{ .crt_file }}
{{- end }}
{{- if .key_file }}
key_file = {{ .key_file }}
{{- end }}
insecure_skip_verify = {{ .skip_ssl | default true }}
{{- end }}
{{- end }}

View File

@ -1,5 +1,5 @@
runtime-endpoint: {{ cri.cri_socket }}
image-endpoint: {{ cri.cri_socket }}
runtime-endpoint: {{ .cri.cri_socket }}
image-endpoint: {{ .cri.cri_socket }}
timeout: 5
debug: false
pull-image-on-create: false

View File

@ -3,17 +3,17 @@
"max-size": "5m",
"max-file":"3"
},
{% if (cri.docker.data_root|defined) %}
"data-root": "{{ cri.docker.data_root }}",
{% endif %}
{% if (cri.registry.mirrors|defined) %}
"registry-mirrors": {{ cri.registry.mirrors|to_json|safe }},
{% endif %}
{% if (cri.registry.insecure_registries|defined) %}
"insecure-registries": {{ cri.registry.insecure_registries|to_json|safe }},
{% endif %}
{% if (cri.docker.bridge_ip|defined) %}
"bip": "{{ cri.docker.bridge_ip }}",
{% endif %}
"exec-opts": ["native.cgroupdriver={{ cri.cgroup_driver }}"]
{{- if .cri.docker.data_root }}
"data-root": "{{ .cri.docker.data_root }}",
{{- end }}
{{- if .cri.registry.mirrors }}
"registry-mirrors": {{ .cri.registry.mirrors | toJson }},
{{- end }}
{{- if .cri.registry.insecure_registries }}
"insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
{{- end }}
{{- if .cri.docker.bridge_ip }}
"bip": "{{ .cri.docker.bridge_ip }}",
{{- end }}
"exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver }}"]
}

View File

@ -21,7 +21,7 @@ etcd:
backup:
backup_dir: /var/lib/etcd-backup
keep_backup_number: 5
# etcd_backup_script: /usr/local/bin/kube-scripts/backup-etcd.sh
etcd_backup_script: "backup.sh"
on_calendar: "*-*-* *:00/30:00"
performance: false
traffic_priority: false

View File

@ -1,28 +1,20 @@
---
- name: Generate default backup etcd script
template:
src: "backup.sh"
dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
mode: 777
when:
- ! etcd.backup.etcd_backup_script|defined
- name: Sync custom backup etcd script
template:
src: "{{ etcd.backup.etcd_backup_script }}"
dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
src: |
{{ .etcd.backup.etcd_backup_script }}
dest: /usr/local/bin/kube-scripts/backup-etcd.sh
mode: 777
when: etcd.backup.etcd_backup_script|defined
- name: Generate backup etcd service
copy:
src: "backup.service"
dest: "/etc/systemd/system/backup-etcd.service"
src: backup.service
dest: /etc/systemd/system/backup-etcd.service
- name: Generate backup etcd timer
template:
src: "backup.timer"
dest: "/etc/systemd/system/backup-etcd.timer"
src: backup.timer
dest: /etc/systemd/system/backup-etcd.timer
- name: Enable etcd timer
command: |

View File

@ -1,44 +1,49 @@
---
- name: Sync etcd binary to node
copy:
src: "{{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ binary_type.stdout }}/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
src: |
{{ .work_dir }}/kubekey/etcd/{{ .etcd_version }}/{{ .binary_type.stdout }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
/tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Extract etcd binary
command: |
tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz \
--wildcards etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}/etcd*
tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz \
--wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}/etcd*
- name: Sync ca file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/ssl/etcd/ssl/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: /etc/ssl/etcd/ssl/ca.crt
- name: Sync etcd cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/ssl/etcd/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/etcd.crt
dest: /etc/ssl/etcd/ssl/server.crt
- name: Sync etcd key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/ssl/etcd/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/etcd.key
dest: /etc/ssl/etcd/ssl/server.key
- name: Generate etcd env file
template:
src: "etcd.env"
dest: "/etc/etcd.env"
src: etcd.env
dest: /etc/etcd.env
- name: Generate etcd systemd service file
copy:
src: "etcd.service"
dest: "/etc/systemd/system/etcd.service"
src: etcd.service
dest: /etc/systemd/system/etcd.service
# refer: https://etcd.io/docs/v3.5/tuning/
- name: Set cpu to performance
command: |
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
when: etcd.performance
when: .etcd.performance
- name: Set Traffic Priority
command: |
@ -47,7 +52,7 @@
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1
when: etcd.traffic_priority
when: .etcd.traffic_priority
- name: Start etcd service
command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd

View File

@ -5,22 +5,23 @@
run_once: true
register: etcd_install_version
- name: Init etcd
when: etcd_install_version.stderr != ""
- name: Install etcd
when: |
or (.etcd_install_version.stderr | ne "") (.etcd_install_version.stdout | hasPrefix (printf "etcd Version: %s\n" (.etcd_version | default "" | trimPrefix "v")) | not)
block:
- name: Add etcd user
command: |
useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
- name: Create etcd directories
command: |
if [ ! -d "{{ item }}" ]; then
mkdir -p {{ item }} && chown -R etcd {{ item }}
fi
loop:
- "/var/lib/etcd"
- name: Init etcd
block:
- name: Add etcd user
command: |
useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
- name: Create etcd directories
command: |
if [ ! -d "{{ .item }}" ]; then
mkdir -p {{ .item }} && chown -R etcd {{ .item }}
fi
loop:
- "/var/lib/etcd"
- include_tasks: install_etcd.yaml
when: etcd_install_version.stderr != ""
- include_tasks: install_etcd.yaml
- include_tasks: backup_etcd.yaml
when: etcd_install_version.stderr != ""
- include_tasks: backup_etcd.yaml

View File

@ -5,10 +5,10 @@ set -o nounset
set -o pipefail
ETCDCTL_PATH='/usr/local/bin/etcdctl'
ENDPOINTS='https://{{ internal_ipv4 }}:2379'
ETCD_DATA_DIR="{{ etcd.env.data_dir }}"
BACKUP_DIR="{{ etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
KEEPBACKUPNUMBER='{{ etcd.backup.keep_backup_number }}'
ENDPOINTS='https://{{ .internal_ipv4 }}:2379'
ETCD_DATA_DIR="{{ .etcd.env.data_dir }}"
BACKUP_DIR="{{ .etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
KEEPBACKUPNUMBER='{{ .etcd.backup.keep_backup_number }}'
((KEEPBACKNUMBER++))
ETCDCTL_CERT="/etc/ssl/etcd/ssl/server.crt"

View File

@ -1,7 +1,7 @@
[Unit]
Description=Timer to backup ETCD
[Timer]
OnCalendar={{ etcd.backup.on_calendar }}
OnCalendar={{ .etcd.backup.on_calendar }}
Unit=backup-etcd.service
[Install]
WantedBy=multi-user.target

View File

@ -1,39 +1,43 @@
ETCD_DATA_DIR={{ etcd.env.data_dir }}
ETCD_ADVERTISE_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
ETCD_INITIAL_CLUSTER_STATE={{ etcd.state }}
ETCD_LISTEN_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }},https://127.0.0.1:2379
ETCD_INITIAL_CLUSTER_TOKEN={{ etcd.env.token }}
ETCD_LISTEN_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
ETCD_NAME={{ inventory_name }}
ETCD_DATA_DIR={{ .etcd.env.data_dir }}
ETCD_ADVERTISE_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
ETCD_INITIAL_CLUSTER_STATE={{ .etcd.state }}
ETCD_LISTEN_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }},https://127.0.0.1:2379
ETCD_INITIAL_CLUSTER_TOKEN={{ .etcd.env.token }}
ETCD_LISTEN_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
ETCD_NAME={{ .inventory_name }}
ETCD_PROXY=off
ETCD_ENABLE_V2=true
ETCD_INITIAL_CLUSTER={% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}{{ hv.inventory_name }}={{ hv.internal_ipv4|stringformat:"https://%s:2380" }}{% if (not forloop.Last) %},{% endif %}{% endfor %}
ETCD_ELECTION_TIMEOUT={{ etcd.env.election_timeout }}
ETCD_HEARTBEAT_INTERVAL={{ etcd.env.heartbeat_interval }}
ETCD_AUTO_COMPACTION_RETENTION={{ etcd.env.compaction_retention }}
ETCD_SNAPSHOT_COUNT={{ etcd.env.snapshot_count }}
{% if (etcd.metrics|defined) %}
ETCD_METRICS={{ etcd.env.metrics }}
{% endif %}
{% if (etcd.env.quota_backend_bytes|defined) %}
ETCD_QUOTA_BACKEND_BYTES={{ etcd.env.quota_backend_bytes }}
{% endif %}
{% if (etcd.env.max_request_bytes|defined) %}
ETCD_MAX_REQUEST_BYTES={{ etcd.env.max_request_bytes }}
{% endif %}
{% if (etcd.env.max_snapshots|defined) %}
ETCD_MAX_SNAPSHOTS={{ etcd.env.max_snapshots }}
{% endif %}
{% if (etcd.env.max_wals|defined) %}
ETCD_MAX_WALS={{ etcd.env.max_wals }}
{% endif %}
{% if (etcd.env.log_level|defined) %}
ETCD_LOG_LEVEL={{ etcd.env.log_level }}
{% endif %}
{% if (etcd.env.unsupported_arch|defined) %}
ETCD_UNSUPPORTED_ARCH={{ etcd.env.unsupported_arch }}
{% endif %}
{{ $ips := list }}
{{- range .groups.etcd | default list -}}
{{- $ips = append $ips (printf "%s=https://%s:2380" (index $.inventory_hosts . "inventory_name") (index $.inventory_hosts . "internal_ipv4")) -}}
{{- end -}}
ETCD_INITIAL_CLUSTER={{ $ips | join "," }}
ETCD_ELECTION_TIMEOUT={{ .etcd.env.election_timeout }}
ETCD_HEARTBEAT_INTERVAL={{ .etcd.env.heartbeat_interval }}
ETCD_AUTO_COMPACTION_RETENTION={{ .etcd.env.compaction_retention }}
ETCD_SNAPSHOT_COUNT={{ .etcd.env.snapshot_count }}
{{- if .etcd.metrics }}
ETCD_METRICS={{ .etcd.env.metrics }}
{{- end }}
{{- if .etcd.env.quota_backend_bytes }}
ETCD_QUOTA_BACKEND_BYTES={{ .etcd.env.quota_backend_bytes }}
{{- end }}
{{- if .etcd.env.max_request_bytes }}
ETCD_MAX_REQUEST_BYTES={{ .etcd.env.max_request_bytes }}
{{- end }}
{{- if .etcd.env.max_snapshots }}
ETCD_MAX_SNAPSHOTS={{ .etcd.env.max_snapshots }}
{{- end }}
{{- if .etcd.env.max_wals }}
ETCD_MAX_WALS={{ .etcd.env.max_wals }}
{{- end }}
{{- if .etcd.env.log_level }}
ETCD_LOG_LEVEL={{ .etcd.env.log_level }}
{{- end }}
{{- if .etcd.env.unsupported_arch }}
ETCD_UNSUPPORTED_ARCH={{ .etcd.env.unsupported_arch }}
{{- end }}
# TLS settings
ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt

View File

@ -2,7 +2,12 @@ image_registry:
# ha_vip: 192.168.122.59
namespace_override: ""
auth:
registry: "{% if (image_registry.ha_vip|defined) %}{{ image_registry.ha_vip }}{% else %}{{ groups['image_registry']|first }}{% endif %}"
registry: |
{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") -}}
{{ .image_registry.ha_vip }}
{{- else -}}
{{ .groups.image_registry | default list | first }}
{{- end -}}
username: admin
password: Harbor12345
# registry type. support: harbor, registry

View File

@ -4,37 +4,31 @@
command: docker --version
register: docker_install_version
- name: Sync docker binary to remote
copy:
src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
when: docker_install_version.stderr != ""
- name: Generate docker config file
template:
src: "docker.config"
dest: "/etc/docker/daemon.json"
when: docker_install_version.stderr != ""
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
when: docker_install_version.stderr != ""
- name: Generate docker service file
copy:
src: "docker.service"
dest: "/etc/systemd/system/docker.service"
when: docker_install_version.stderr != ""
- name: Generate containerd service file
copy:
src: "containerd.service"
dest: "/etc/systemd/system/containerd.service"
when: docker_install_version.stderr != ""
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
when: docker_install_version.stderr != ""
- name: Install docker
when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
block:
- name: Sync docker binary to remote
copy:
src: |
{{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
dest: |
/tmp/kubekey/docker-{{ .docker_version }}.tgz
- name: Generate docker config file
template:
src: docker.config
dest: /etc/docker/daemon.json
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
- name: Generate docker service file
copy:
src: docker.service
dest: /etc/systemd/system/docker.service
- name: Generate containerd service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service

View File

@ -5,9 +5,9 @@
register: dockercompose_install_version
- name: Sync docker-compose to remote
when: or (.dockercompose_install_version.stderr | ne "") (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version))
copy:
src: "{{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ binary_type.stdout }}/docker-compose"
dest: "/usr/local/bin/docker-compose"
src: |
{{ .work_dir }}/kubekey/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type.stdout }}/docker-compose
dest: /usr/local/bin/docker-compose
mode: 0755
when:
- dockercompose_install_version.stderr != ""

View File

@ -1,44 +1,52 @@
---
- name: Sync harbor package to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ binary_type.stdout }}/harbor-offline-installer-{{ harbor_version }}.tgz"
dest: "/opt/harbor/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz"
src: |
{{ .work_dir }}/kubekey/image-registry/harbor/{{ .harbor_version }}/{{ .binary_type.stdout }}/harbor-offline-installer-{{ .harbor_version }}.tgz
dest: |
/opt/harbor/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
- name: Untar harbor package
command: |
cd /opt/harbor/{{ harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ harbor_version }}.tgz
cd /opt/harbor/{{ .harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ .harbor_version }}.tgz
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/opt/harbor/{{ .harbor_version }}/ssl/server.crt
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/opt/harbor/{{ .harbor_version }}/ssl/server.key
- name: Generate harbor config
template:
src: "harbor.config"
dest: "/opt/harbor/{{ harbor_version }}/harbor/harbor.yml"
src: harbor.config
dest: |
/opt/harbor/{{ .harbor_version }}/harbor/harbor.yml
- name: Generate keepalived docker compose
template:
src: "harbor_keepalived.docker-compose"
dest: "/opt/harbor/{{ harbor_version }}/harbor/docker-compose-keepalived.yml"
src: harbor_keepalived.docker-compose
dest: |
/opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml
when:
- image_registry.ha_vip | defined
- image_registry_service.stderr != ""
- and .image_registry.ha_vip (ne .image_registry.ha_vip "")
- .image_registry_service.stderr | ne ""
- name: Install harbor
command: |
cd /opt/harbor/{{ harbor_version }}/harbor && /bin/bash install.sh
cd /opt/harbor/{{ .harbor_version }}/harbor && /bin/bash install.sh
- name: Register harbor service
template:
src: "harbor.service"
dest: "/etc/systemd/system/harbor.service"
src: harbor.service
dest: /etc/systemd/system/harbor.service
- name: Start harbor service
command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service

View File

@ -1,19 +1,23 @@
---
- name: Sync keepalived image to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ binary_type.stdout }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
dest: "/opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
src: |
{{ .work_dir }}/kubekey/image-registry/keepalived/{{ .keepalived_version }}/{{ .binary_type.stdout }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
dest: |
/opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Load keeplived image
command: |
docker load -i /opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz
docker load -i /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Sync keeplived config to remote
template:
src: "keeplived.config"
dest: "/opt/keeplived/{{ keepalived_version }}/keepalived.conf"
src: keeplived.config
dest: |
/opt/keeplived/{{ .keepalived_version }}/keepalived.conf
- name: Sync healthcheck shell to remote
template:
src: "keepalived.healthcheck"
dest: "/opt/keeplived/{{ keepalived_version }}/healthcheck.sh"
src: keepalived.healthcheck
dest: |
/opt/keeplived/{{ .keepalived_version }}/healthcheck.sh

View File

@ -1,52 +1,58 @@
---
- name: Sync registry image to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ binary_type.stdout }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
dest: "/opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
src: |
{{ .work_dir }}/kubekey/image-registry/registry/{{ .registry_version }}/{{ .binary_type.stdout }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
dest: |
/opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Mount NFS dir
command: |
if [ {{ os.release.ID_LIKE }} == 'debian' ]; then
{{- if .os.release.ID_LIKE | eq "debian" -}}
yum update && yum install -y nfs-utils
elif [ {{ os.release.ID_LIKE }} == 'rhel fedora' ]
{{- else if .os.release.ID_LIKE | eq "rhel fedora" -}}
apt update && apt install -y nfs-common
fi
nfsHostName={{ groups['nfs']|first }}
{% set hv=inventory_hosts['$nfsHostName'] %}
mount -t nfs {{ hv.internal_ipv4 }}:{{ image_registry.registry.storage.filesystem.nfs_mount }} {{ image_registryregistry.storage.filesystem.rootdirectory }}
{{- end -}}
mount -t nfs {{ index .inventory_hosts (.groups.nfs | default list | first) "internal_ipv4" }}:{{ .image_registry.registry.storage.filesystem.nfs_mount }} {{ .image_registryregistry.storage.filesystem.rootdirectory }}
when:
- image_registry.registry.storage.filesystem.nfs_mount | defined
- groups['nfs']|length == 1
- image_registry_service.stderr != ""
- and .image_registry.registry.storage.filesystem.nfs_mount (ne .image_registry.registry.storage.filesystem.nfs_mount "")
- .groups.nfs | default list | len | eq 1
- .image_registry_service.stderr | ne ""
- name: Load registry image
command: |
docker load -i /opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz
docker load -i /opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/registry/{{ registry_version }}/ssl/server.crt"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.crt
dest: |
/opt/registry/{{ .registry_version }}/ssl/server.crt
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/registry/{{ registry_version }}/ssl/server.key"
src: |
{{ .work_dir }}/kubekey/pki/image_registry.key
dest: |
/opt/registry/{{ .registry_version }}/ssl/server.key
- name: Generate registry docker compose
template:
src: "registry.docker-compose"
dest: "/opt/registry/{{ registry_version }}/docker-compose.yml"
src: registry.docker-compose
dest: |
/opt/registry/{{ .registry_version }}/docker-compose.yml
- name: Generate registry config
template:
src: "registry.config"
dest: "/opt/registry/{{ registry_version }}/config.yml"
src: registry.config
dest: |
/opt/registry/{{ .registry_version }}/config.yml
- name: Register registry service
copy:
src: "registry.service"
dest: "/etc/systemd/system/registry.service"
src: registry.service
dest: /etc/systemd/system/registry.service
- name: Start registry service
command: systemctl daemon-reload && systemctl start registry.service && systemctl enable registry.service

View File

@ -2,51 +2,53 @@
- name: Create harbor project for each image
tags: ["only_image"]
command: |
{{- if .image_registry.namespace_override | eq "" -}}
for dir in /tmp/kubekey/images/*; do
if [ ! -d "$dir" ]; then
# only deal directory
# only deal with directories
continue
fi
IFS='=' read -ra array <<< "${dir##*/}"
if [ $(echo ${my_array[@]} | wc -w) > 3 ]; then
project=${array[1]}
dest_image=$(echo "${array[@]:2:-1}" | tr ' ' '/')
tag=${array[-1]}
dir_name=${dir##*/}
IFS='=' set -- $dir_name
image_array="$@"
array_length=$#
if [ "$array_length" -gt 3 ]; then
project=$2
dest_image=$(shift 2 && echo "$*" | tr ' ' '/')
tag=$(echo "$@" | awk '{print $NF}')
else
echo "unsupported image"
echo "unsupported image: $dir_name"
exit 1
fi
# if project is not exist, create if
http_code=$(curl -Iks -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
if [ $http_code == 404 ]; then
# create project
curl -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
fi
done
when:
- image_registry.type == 'harbor'
- image_registry.namespace_override == ""
-
- name: Create harbor project for namespace_override
tags: ["only_image"]
command: |
{{- else -}}
# if project is not exist, create if
http_code=$(curl -Iks -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name={{ image_registry.namespace_override }}' | grep HTTP | awk '{print $2}')
http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name={{ .image_registry.namespace_override }}' | grep HTTP | awk '{print $2}')
if [ $http_code == 404 ]; then
# create project
curl -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"{{ image_registry.namespace_override }}\", \"public\": true}"
curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"{{ .image_registry.namespace_override }}\", \"public\": true}"
fi
when:
- image_registry.type == 'harbor'
- image_registry.namespace_override != ""
{{- end -}}
when: .image_registry.type | eq "harbor"
- name: Sync images package to harbor
tags: ["only_image"]
image:
push:
registry: "{{ image_registry.auth.registry }}"
namespace_override: "{{ image_registry.namespace_override }}"
username: "{{ image_registry.auth.username }}"
password: "{{ image_registry.auth.password }}"
registry: |
{{ .image_registry.auth.registry }}
namespace_override: |
{{ .image_registry.namespace_override }}
username: |
{{ .image_registry.auth.username }}
password: |
{{ .image_registry.auth.password }}

View File

@ -4,27 +4,27 @@
- include_tasks: install_docker_compose.yaml
- include_tasks: install_keepalived.yaml
when: image_registry.ha_vip | defined
when: and .image_registry.ha_vip (ne .image_registry.ha_vip "")
- name: Install harbor
when: image_registry.type == 'harbor'
when: .image_registry.type | eq "harbor"
block:
- name: Check if harbor installed
ignore_errors: true
command: systemctl status harbor.service
register: image_registry_service
- include_tasks: install_registry.yaml
when: image_registry_service.stderr != ""
register: harbor_service_status
- include_tasks: install_harbor.yaml
when: .harbor_service_status.stderr | ne ""
- name: Install registry
when: image_registry.type == 'registry'
when: .image_registry.type | eq "registry"
block:
- name: Check if registry installed
ignore_errors: true
command: systemctl status registry.service
register: image_registry_service
register: registry_service_status
- include_tasks: install_registry.yaml
when: image_registry_service.stderr != ""
when: .registry_service_status.stderr | ne ""
- include_tasks: load_images.yaml
tags: ["only_image"]

View File

@ -3,17 +3,17 @@
"max-size": "5m",
"max-file":"3"
},
{% if (cri.docker.data_root|defined) %}
"data-root": {{ cri.docker.data_root }},
{% endif %}
{% if (registry.mirrors|defined) %}
"registry-mirrors": {{ registry.mirrors|to_json|safe }},
{% endif %}
{% if (registry.insecure_registries|defined) %}
"insecure-registries": {{ registry.insecure_registries|to_json|safe }},
{% endif %}
{% if (cri.docker.bridge_ip|defined) %}
"bip": "{{ cri.docker.bridge_ip }}",
{% endif %}
"exec-opts": ["native.cgroupdriver=systemd"]
{{- if and .cri.docker.data_root (ne .cri.docker.data_root "") }}
"data-root": "{{ .cri.docker.data_root }}",
{{- end }}
{{- if and .cri.registry.mirrors (ne .cri.registry.mirrors "") }}
"registry-mirrors": {{ .cri.registry.mirrors | toJson }},
{{- end }}
{{- if and .cri.registry.insecure_registries (ne .cri.registry.insecure_registries "") }}
"insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
{{- end }}
{{- if and .cri.docker.bridge_ip (ne .cri.docker.bridge_ip "") }}
"bip": "{{ .cri.docker.bridge_ip }}",
{{- end }}
"exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver | default "systemd" }}"]
}

View File

@ -2,7 +2,7 @@
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: {{ internal_ipv4 }}
hostname: {{ .internal_ipv4 }}
# http related config
http:
@ -14,8 +14,8 @@ https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
certificate: /opt/harbor/{{ harbor_version }}/ssl/server.crt
private_key: /opt/harbor/{{ harbor_version }}/ssl/server.key
certificate: /opt/harbor/{{ .harbor_version }}/ssl/server.crt
private_key: /opt/harbor/{{ .harbor_version }}/ssl/server.key
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
@ -34,7 +34,7 @@ https:
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: {{ image_registry.auth.password }}
harbor_admin_password: {{ .image_registry.auth.password }}
# Harbor DB configuration
database:
@ -224,7 +224,7 @@ _version: 2.10.0
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Components doesn't need to connectorVars to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.

View File

@ -5,7 +5,7 @@ Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/harbor/docker-compose.yml up {% if (image_registry.ha_vip | defined) %}&& /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/docker-compose-keepalived.yml up{% endif %}
ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose.yml up{{ if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }} && /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml up{{ end }}
ExecStop=/usr/local/bin/docker-compose -p harbor down
Restart=on-failure
[Install]

View File

@ -2,7 +2,7 @@
version: '2.3'
services:
keepalived:
image: osixia/keepalived: {{ keepalived_version }}
image: osixia/keepalived: {{ .keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
@ -17,10 +17,10 @@ services:
- proxy
volumes:
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- harbor

View File

@ -23,7 +23,7 @@ vrrp_script healthcheck {
auth_pass k8s-test
}
virtual_ipaddress {
{{ image_registry.ha_vip }}
{{ .image_registry.ha_vip }}
}
track_script {
healthcheck

View File

@ -1,12 +1,12 @@
#!/bin/bash
{% if (image_registry.type=='registry') %}
{{- if .image_registry.type | eq "registry" }}
# registry service
service=registry:5000
{% else %}
{{- else }}
# harbor service
service=harbor:80
{% endif %}
{{- end }}
nc -zv -w 2 $service > /dev/null 2>&1

View File

@ -22,55 +22,55 @@ log:
# to:
# - errors@example.com
storage:
{% if (image_registryregistry.storage.filesystem|length != 0) %}
{{- if and .image_registry.registry.storage.filesystem.rootdirectory (ne .image_registry.registry.storage.filesystem.rootdirectory "") }}
filesystem:
rootdirectory: {{ image_registryregistry.storage.filesystem.rootdirectory }}
rootdirectory: {{ .image_registry.registry.storage.filesystem.rootdirectory }}
maxthreads: 100
{% endif %}
{% if (image_registryregistry.storage.azure|length != 0) %}
{{- end }}
{{- if .image_registry.registry.storage.azure }}
azure:
accountname: {{ image_registryregistry.storage.azure.accountname }}
accountkey: {{ image_registryregistry.storage.azure.accountkey }}
container: {{ image_registryregistry.storage.azure.container }}
{% endif %}
{% if (image_registryregistry.storage.gcs|length != 0) %}
accountname: {{ .image_registry.registry.storage.azure.accountname }}
accountkey: {{ .image_registry.registry.storage.azure.accountkey }}
container: {{ .image_registry.registry.storage.azure.container }}
{{- end }}
{{- if .image_registry.registry.storage.gcs }}
gcs:
bucket: {{ image_registryregistry.storage.gcs.bucket }}
keyfile: {{ image_registryregistry.storage.gcs.keyfile }}
bucket: {{ .image_registry.registry.storage.gcs.bucket }}
keyfile: {{ .image_registry.registry.storage.gcs.keyfile }}
credentials:
type: service_account
project_id: {{ image_registryregistry.storage.gcs.credentials.project_id }}
private_key_id: {{ image_registryregistry.storage.gcs.credentials.private_key_id }}
private_key: {{ image_registryregistry.storage.gcs.credentials.private_key }}
client_email: {{ image_registryregistry.storage.gcs.credentials.client_email }}
client_id: {{ image_registryregistry.storage.gcs.credentials.client_id }}
auth_uri: {{ image_registryregistry.storage.gcs.credentials.auth_uri }}
token_uri: {{ image_registryregistry.storage.gcs.credentials.token_uri }}
auth_provider_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.auth_provider_x509_cert_url }}
client_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.client_x509_cert_url }}
rootdirectory: {{ image_registryregistry.storage.gcs.rootdirectory }}
{% endif %}
{% if (image_registryregistry.storage.s3|length != 0) %}
project_id: {{ .image_registry.registry.storage.gcs.credentials.project_id }}
private_key_id: {{ .image_registry.registry.storage.gcs.credentials.private_key_id }}
private_key: {{ .image_registry.registry.storage.gcs.credentials.private_key }}
client_email: {{ .image_registry.registry.storage.gcs.credentials.client_email }}
client_id: {{ .image_registry.registry.storage.gcs.credentials.client_id }}
auth_uri: {{ .image_registry.registry.storage.gcs.credentials.auth_uri }}
token_uri: {{ .image_registry.registry.storage.gcs.credentials.token_uri }}
auth_provider_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.auth_provider_x509_cert_url }}
client_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.client_x509_cert_url }}
rootdirectory: {{ .image_registry.registry.storage.gcs.rootdirectory }}
{{- end }}
{{- if .image_registry.registry.storage.s3 }}
s3:
accesskey: {{ image_registryregistry.storage.s3.accesskey }}
secretkey: {{ image_registryregistry.storage.s3.secretkey }}
region: {{ image_registryregistry.storage.s3.region }}
regionendpoint: {{ image_registryregistry.storage.s3.regionendpoint }}
accesskey: {{ .image_registry.registry.storage.s3.accesskey }}
secretkey: {{ .image_registry.registry.storage.s3.secretkey }}
region: {{ .image_registry.registry.storage.s3.region }}
regionendpoint: {{ .image_registry.registry.storage.s3.regionendpoint }}
forcepathstyle: true
accelerate: false
bucket: {{ image_registryregistry.storage.s3.bucket }}
bucket: {{ .image_registry.registry.storage.s3.bucket }}
encrypt: true
keyid: {{ image_registryregistry.storage.s3.keyid }}
keyid: {{ .image_registry.registry.storage.s3.keyid }}
secure: true
v4auth: true
chunksize: 5242880
multipartcopychunksize: 33554432
multipartcopymaxconcurrency: 100
multipartcopythresholdsize: 33554432
rootdirectory: {{ image_registryregistry.storage.s3.rootdirectory }}
rootdirectory: {{ .image_registry.registry.storage.s3.rootdirectory }}
usedualstack: false
loglevel: debug
{% endif %}
{{- end }}
inmemory: # This driver takes no parameters
delete:
enabled: false

View File

@ -2,7 +2,7 @@
version: '2.3'
services:
registry:
image: registry:{{ registry_version }}
image: registry:{{ .registry_version }}
container_name: registry
restart: always
dns_search: .
@ -15,18 +15,18 @@ services:
- SETUID
volumes:
- type: bind
source: /opt/registry/{{ registry_version }}/ssl/
source: /opt/registry/{{ .registry_version }}/ssl/
target: /etc/registry/ssl/
- type: bind
source: /opt/registry/{{ registry_version }}/config.yml
source: /opt/registry/{{ .registry_version }}/config.yml
target: /etc/docker/registry/config.yml
port:
- 443:5000
networks:
- registry
{% if (image_registry.ha_vip | defined) %}
{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
keepalived:
image: osixia/keepalived: {{ keepalived_version }}
image: osixia/keepalived:{{ .keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
@ -41,14 +41,14 @@ services:
- registry
volumes:
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- registry
{% endif %}
{{- end }}
networks:
registry:
external: false

View File

@ -5,7 +5,7 @@ Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ registry_version }}/docker-compose.yml up
ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ .registry_version }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -p registry down
Restart=on-failure
[Install]

View File

@ -3,8 +3,8 @@ kubernetes:
# support: flannel, calico
kube_network_plugin: calico
# the image repository of kubernetes.
image_repository: "{{ k8s_registry }}"
image_repository: |
{{ .k8s_registry }}
# memory size for each kube_worker node.(unit kB)
# should be greater than or equal to minimal_node_memory_mb.
minimal_node_memory_mb: 10
@ -17,9 +17,12 @@ kubernetes:
# the first value is ipv4_cidr, the last value is ipv6_cidr.
pod_cidr: 10.233.64.0/18
service_cidr: 10.233.0.0/18
dns_image: "{{ k8s_registry }}/coredns/coredns:v1.8.6"
dns_cache_image: "{{ dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20"
dns_service_ip: "{{ kubernetes.networking.service_cidr|ip_range:2 }}"
dns_image: |
{{ .k8s_registry }}/coredns/coredns:v1.8.6
dns_cache_image: |
{{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20
dns_service_ip: |
{{ .kubernetes.networking.service_cidr | ipInCIDR 2 }}
# Specify a stable IP address or DNS name for the control plane.
# control_plane_endpoint: lb.kubesphere.local
apiserver:
@ -49,10 +52,10 @@ kubernetes:
kubelet:
max_pod: 110
pod_pids_limit: 10000
feature_gates: {}
# feature_gates:
container_log_max_size: 5Mi
container_log_max_files: 3
extra_args: {}
# extra_args:
coredns:
dns_etc_hosts: []
# the config for zones
@ -72,7 +75,7 @@ kubernetes:
cache: 30
kubernetes:
zones:
- "{{ kubernetes.networking.dns_domain }}"
- "{{ .kubernetes.networking.dns_domain }}"
# rewrite performs internal message rewriting.
# rewrite:
# # specify multiple rules and an incoming query matches multiple rules.
@ -142,17 +145,22 @@ kubernetes:
max_concurrent: 1000
kube_vip:
enabled: false
address: |
{{ .kubernetes.control_plane_endpoint }}
# support:BGP, ARP
mode: BGP
image: "{{ dockerio_registry }}/plndr/kube-vip:v0.7.2"
image: |
{{ .dockerio_registry }}/plndr/kube-vip:v0.7.2
haproxy:
enabled: false
health_port: 8081
image: "{{ dockerio_registry }}/library/haproxy:2.9.6-alpine"
etcd: # todo should apply zone variable
image: |
{{ .dockerio_registry }}/library/haproxy:2.9.6-alpine
etcd:
# It is possible to deploy etcd with three methods.
# external: Deploy etcd cluster with external etcd cluster.
# internal: Deploy etcd cluster by static pod.
deployment_type: external
image: "{{ k8s_registry }}/etcd:3.5.0"
image: |
{{ .k8s_registry }}/etcd:3.5.0
custom_label: {}

View File

@ -2,30 +2,27 @@
# install with static pod: https://kube-vip.io/docs/installation/static/
- name: Get interface for ipv4
command: |
ip route | grep ' {{ internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
ip route | grep ' {{ .internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
register: interface
- name: Should ipv4 interface not be empty
assert: interface.stdout != ""
fail_msg: "{{ internal_ipv4 }} cannot be found in network interface."
- name: Generate kubevip manifest
template:
src: "kubevip/kubevip.{{ kubernetes.kube_vip.mode }}"
dest: "/etc/kubernetes/manifests/kubevip.yaml"
src: |
kubevip/kubevip.{{ .kubernetes.kube_vip.mode }}
dest: /etc/kubernetes/manifests/kubevip.yaml
- name: Update kubelet config
command: |
sed -i 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
sed -i 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
systemctl restart kubelet
- name: Update kube-proxy config
command: |
set -o pipefail && /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml \
| sed 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' \
| sed 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' \
| /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf replace -f -
/usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0
- name: Update hosts file
command: |
sed -i 's#.* {{ kubernetes.control_plane_endpoint }}#127.0.0.1 {{ kubernetes.control_plane_endpoint }}s#g' /etc/hosts
sed -i 's#.* {{ .kubernetes.control_plane_endpoint }}#127.0.0.1 {{ .kubernetes.control_plane_endpoint }}s#g' /etc/hosts

View File

@ -5,8 +5,8 @@
- name: Create kube directories
command: |
if [ ! -d "{{ item.path }}" ]; then
mkdir -p {{ item.path }} && chown kube -R {{ item.chown }}
if [ ! -d "{{ .item.path }}" ]; then
mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
fi
loop:
- {path: "/usr/local/bin", chown: "/usr/local/bin"}
@ -20,52 +20,49 @@
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
- name: Sync external etcd config
when:
- kubernetes.etcd.deployment_type == 'external' && groups['etcd']|length > 0
when: and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
block:
- name: Sync etcd ca file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/kubernetes/pki/etcd/ca.crt"
src: |
{{ .work_dir }}/kubekey/pki/root.crt
dest: /etc/kubernetes/pki/etcd/ca.crt
- name: Sync etcd cert files to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/kubernetes/pki/etcd/client.crt"
src: |
{{ .work_dir }}/kubekey/pki/etcd.crt
dest: /etc/kubernetes/pki/etcd/client.crt
- name: Sync etcd key files to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/kubernetes/pki/etcd/client.key"
src: |
{{ .work_dir }}/kubekey/pki/etcd.key
dest: /etc/kubernetes/pki/etcd/client.key
- name: Sync audit policy file to remote
copy:
src: "audit"
dest: "/etc/kubernetes/audit/"
when:
- kubernetes.audit
src: audit
dest: /etc/kubernetes/audit/
when: .kubernetes.audit
- name: Generate kubeadm init config
template:
src: "kubeadm/{% if (kube_version|version:'>=v1.24.0') %}kubeadm-init.v1beta3{% else %}kubeadm-init.v1beta2{% endif %}"
dest: "/etc/kubernetes/kubeadm-config.yaml"
src: |
{{- if .kube_version | semverCompare ">=v1.24.0" -}}
kubeadm/kubeadm-init.v1beta3
{{- else -}}
kubeadm/kubeadm-init.v1beta2
{{- end -}}
dest: /etc/kubernetes/kubeadm-config.yaml
- name: Init kubernetes cluster
block:
- name: Init kubernetes by kubeadm
command: |
/usr/local/bin/kubeadm init \
--config=/etc/kubernetes/kubeadm-config.yaml \
--ignore-preflight-errors=FileExisting-crictl,ImagePull \
{% if (not kubernetes.kube_proxy.enabled) %}--skip-phases=addon/kube-proxy{% endif %}
/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
rescue:
- name: Reset kubeadm if init failed
command: kubeadm reset -f {% if (cri.cri_socket !="") %}--cri-socket {{ cri.cri_socket }}{% endif %}
- name: Remote master taint
ignore_errors: true
command: |
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
when: inventory_name in groups["kube_worker"]
command: |
kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
- name: Copy kubeconfig to default dir
command: |
@ -73,16 +70,16 @@
mkdir -p /root/.kube
fi
cp -f /etc/kubernetes/admin.conf /root/.kube/config
when: kube_node_info_important.stderr != ""
when: .kube_node_info_important.stderr | ne ""
- name: Set to worker node
when: inventory_name in groups["kube_worker"]
when: .groups.kube_worker | default list | has .inventory_name
block:
- name: Remote master taint
ignore_errors: true
command: |
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- name: Add work label
command: |
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
/usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker=

View File

@ -0,0 +1,65 @@
---
- name: Check if helm is installed
ignore_errors: true
command: helm version
register: helm_install_version
- name: Install helm
when: or (.helm_install_version.stderr | ne "") (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
block:
- name: Sync helm to remote
copy:
src: |
{{ .work_dir }}/kubekey/helm/{{ .helm_version }}/{{ .binary_type.stdout }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
/tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Install helm
command: |
tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ .binary_type.stdout }}/helm
- name: Check if kubeadm is installed
ignore_errors: true
command: kubeadm version -o short
register: kubeadm_install_version
- name: Install kubeadm
when: or (.kubeadm_install_version.stderr | ne "") (.kubeadm_install_version.stdout | ne .kube_version)
copy:
src: |
{{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubeadm
dest: /usr/local/bin/kubeadm
mode: 0755
- name: Check if kubectl is installed
ignore_errors: true
command: kubectl version
register: kubectl_install_version
- name: Sync kubectl to remote
when: or (.kubectl_install_version.stderr | ne "") (.kubectl_install_version.stdout | contains (printf "GitVersion:\"%s\"" .kube_version) | not)
copy:
src: |
{{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubectl
dest: /usr/local/bin/kubectl
mode: 0755
- name: Check if kubelet is installed
ignore_errors: true
command: kubelet --version
register: kubelet_install_version
- name: Install kubelet
when: or (.kubelet_install_version.stderr | ne "") (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
block:
- name: Sync kubelet to remote
copy:
src: |
{{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubelet
dest: /usr/local/bin/kubelet
mode: 0755
- name: Sync kubelet env to remote
template:
src: kubeadm/kubelet.env
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
- name: Sync kubelet service to remote
copy:
src: kubelet.service
dest: /etc/systemd/system/kubelet.service
- name: Register kubelet service
command: systemctl daemon-reload && systemctl enable kubelet.service

View File

@ -1,69 +0,0 @@
---
- name: Check if helm is installed
ignore_errors: true
command: helm version
register: helm_install_version
- name: Sync helm to remote
copy:
src: "{{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ binary_type.stdout }}/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: helm_install_version.stderr != ""
- name: Install helm
command: |
tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ binary_type.stdout }}/helm
when: helm_install_version.stderr != ""
- name: Check if kubeadm is installed
ignore_errors: true
command: kubeadm version
register: kubeadm_install_version
- name: Sync kubeadm to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubeadm"
dest: "/usr/local/bin/kubeadm"
mode: 0755
when: kubeadm_install_version.stderr != ""
- name: Check if kubectl is installed
ignore_errors: true
command: kubectl version
register: kubectl_install_version
- name: Sync kubectl to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubectl"
dest: "/usr/local/bin/kubectl"
mode: 0755
when: kubectl_install_version.stderr != ""
- name: Check if kubelet is installed
ignore_errors: true
command: systemctl status kubelet
register: kubelet_install_version
- name: Sync kubelet to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubelet"
dest: "/usr/local/bin/kubelet"
mode: 0755
when: kubelet_install_version.stderr != ""
- name: Sync kubelet env to remote
template:
src: "kubeadm/kubelet.env"
dest: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
when: kubelet_install_version.stderr != ""
- name: Sync kubelet service to remote
copy:
src: "kubelet.service"
dest: "/etc/systemd/system/kubelet.service"
when: kubelet_install_version.stderr != ""
- name: Register kubelet service
command: systemctl daemon-reload && systemctl enable kubelet.service
when: kubelet_install_version.stderr != ""

View File

@ -1,15 +1,19 @@
---
- name: Generate kubeadm join config
template:
src: kubeadm/{% if (kube_version|version:">=v1.24.0") %}kubeadm-join.v1beta3{% else %}kubeadm-join.v1beta2{% endif %}
src: |
{{- if .kube_version | semverCompare ">=v1.24.0" -}}
kubeadm/kubeadm-join.v1beta3
{{- else -}}
kubeadm/kubeadm-join.v1beta2
{{- end -}}
dest: /etc/kubernetes/kubeadm-config.yaml
- name: Sync audit policy file to remote
copy:
src: "audit"
dest: "/etc/kubernetes/audit/"
when:
- kubernetes.audit
src: audit
dest: /etc/kubernetes/audit/
when: .kubernetes.audit
- name: Join kubernetes cluster
block:
@ -18,21 +22,22 @@
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
rescue:
- name: Reset kubeadm if join failed
command: kubeadm reset -f {% if (cri.cri_socket|defined && cri.cri_socket != "") %}--cri-socket {{ cri.cri_socket }}{% endif %}
command: kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
- name: Sync kubeconfig to remote
copy:
src: "{{ work_dir }}/kubekey/kubeconfig"
src: |
{{ .work_dir }}/kubekey/kubeconfig
dest: /root/.kube/config
- name: Set to worker node
when: inventory_name in groups["kube_worker"]
when: .groups.kube_worker | default list | has .inventory_name
block:
- name: Remote master taint
ignore_errors: true
command: |
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- name: Add work label
command: |
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
/usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker=

View File

@ -1,31 +1,33 @@
---
- name: Check kubernetes if installed
ignore_errors: true
command: kubectl get node --field-selector metadata.name={{ inventory_name }}
command: kubectl get node --field-selector metadata.name={{ .inventory_name }}
register: kube_node_info_important
- include_tasks: install_kube_binaries.yaml
- include_tasks: install_binaries.yaml
- include_tasks: deploy_kube_vip.yaml
when:
- kubernetes.kube_vip.enabled
- inventory_name in groups['kube_control_plane']
- .kubernetes.kube_vip.enabled
- .groups.kube_control_plane | default list | has .inventory_name
- name: Select init kubernetes node
run_once: true
set_fact:
init_kubernetes_node: "{{ groups['kube_control_plane']|first }}"
init_kubernetes_node: |
{{ .groups.kube_control_plane | default list | first }}
- name: Init kubernetes
when: inventory_name == init_kubernetes_node
when: eq .inventory_name .init_kubernetes_node
block:
- include_tasks: init_kubernetes.yaml
when: kube_node_info_important.stderr != ""
when: .kube_node_info_important.stderr | ne ""
- include_tasks: deploy_cluster_dns.yaml
- name: Fetch kubeconfig to local
fetch:
src: /etc/kubernetes/admin.conf
dest: "{{ work_dir }}/kubekey/kubeconfig"
dest: |
{{ .work_dir }}/kubekey/kubeconfig
- name: Generate certificate key by kubeadm
command: |
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \
@ -33,7 +35,8 @@
register: kubeadm_cert_result
- name: Set_Fact certificate key to all hosts
set_fact:
kubeadm_cert: "{{ kubeadm_cert_result.stdout }}"
kubeadm_cert: |
{{ .kubeadm_cert_result.stdout }}
- name: Generate kubeadm token
block:
- name: Generate token by kubeadm
@ -41,24 +44,26 @@
register: kubeadm_token_result
- name: Set_Fact token to all hosts
set_fact:
kubeadm_token: "{{ kubeadm_token_result.stdout }}"
kubeadm_token: |
{{ .kubeadm_token_result.stdout }}
- name: Set_Fact init endpoint
set_fact:
init_kubernetes_endpoint: "{{ inventory_name }}"
init_kubernetes_endpoint: |
{{ .inventory_name }}
- include_tasks: join_kubernetes.yaml
when:
- kube_node_info_important.stderr != ""
- inventory_name != init_kubernetes_node
- .kube_node_info_important.stderr | ne ""
- ne .inventory_name .init_kubernetes_node
- include_tasks: deploy_haproxy.yaml
when:
- kubernetes.haproxy.enabled
- inventory_name in groups['kube_worker']
- .kubernetes.haproxy.enabled
- .groups.kube_worker | default list | has .inventory_name
- name: Add custom label to cluster
command: |
{% for k,v in kubernetes.custom_label %}
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} {{ k }}={{ v }}
{% endfor %}
when: kubernetes.custom_label | length > 0
{{- range $k, $v := .kubernetes.custom_label -}}
/usr/local/bin/kubectl label --overwrite node {{ $.inventory_name }} {{ $k }}={{ $v }}
{{- end -}}
when: .kubernetes.custom_label | len | lt 0

View File

@ -47,7 +47,7 @@ metadata:
prometheus.io/scrape: "true"
createdby: 'kubekey'
spec:
clusterIP: {{ kubernetes.networking.dns_service_ip }}
clusterIP: {{ .kubernetes.networking.dns_service_ip }}
selector:
k8s-app: kube-dns
ports:
@ -119,7 +119,7 @@ spec:
- ""
containers:
- name: coredns
image: "{{ kubernetes.networking.dns_image }}"
image: "{{ .kubernetes.networking.dns_image }}"
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
@ -185,58 +185,78 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% for ez in kubernetes.coredns.zone_configs %}
{{ ez.zones|join:" " }} {
cache {{ ez.cache }}
{% for c in ez.additional_configs %}
{{ c }}
{% endfor %}
{{- range .kubernetes.coredns.zone_configs }}
{{ .zones | join " " }} {
cache {{ .cache }}
{{- range .additional_configs }}
{{ . }}
{{- end }}
{% for r in ez.rewrite %}
rewrite {{ r.rule }} {
{{ r.field }} {{ r.type }} {{ r.value }}
{{ r.options }}
{{- range .rewrite }}
rewrite {{ .rule }} {
{{ .field }} {{ .type }} {{ .value }}
{{ .options }}
}
{% endfor %}
{{- end }}
health {
lameduck 5s
}
{% if (ez.kubernetes.zones|defined) %}
kubernetes {{ ez.kubernetes.zones|join:" " }} in-addr.arpa ip6.arpa {
{{- if .kubernetes.zones | len | lt 0 }}
kubernetes {{ .kubernetes.zones | join " " }} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
{% endif %}
{{- end }}
{% for f in ez.forward %}
forward {{ f.from }} {{ f.to|join:" " }} {
{% if (f.except|length > 0) %} except {{ f.except|join:" " }}{% endif %}
{% if (f.force_tcp) %} force_tcp{% endif %}
{% if (f.prefer_udp) %} prefer_udp{% endif %}
{% if (f.max_fails|defined) %} max_fails {{ f.max_fails }}{% endif %}
{% if (f.expire|defined) %} expire {{ f.expire }}{% endif %}
{% if (f.tls|defined) %} tls {{ f.tls.cert_file }} {{ f.tls.key_file }} {{ f.tls.ca_file }}{% endif %}
{% if (f.tls_servername|defined) %} tls_servername {{ f.tls_servername }}{% endif %}
{% if (f.policy|defined) %} policy {{ f.policy }}{% endif %}
{% if (f.health_check|defined) %} health_check {{ f.health_check }}{% endif %}
{% if (f.max_concurrent|defined) %} max_concurrent {{ f.max_concurrent }}{% endif %}
{{- range .forward }}
forward {{ .from }} {{ .to | join " " }} {
{{- if .except | len | lt 0 }}
except {{ .except | join " " }}
{{- end }}
{{- if .force_tcp }}
force_tcp
{{- end }}
{{- if .prefer_udp }}
prefer_udp
{{- end }}
{{- if .max_fails }}
max_fails {{ .max_fails }}
{{- end }}
{{- if .expire }}
expire {{ .expire }}
{{- end }}
{{- if .tls }}
tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
{{- end }}
{{- if .tls_servername }}
tls_servername {{ .tls_servername }}
{{- end }}
{{- if .policy }}
policy {{ .policy }}
{{- end }}
{{- if .health_check }}
health_check {{ .health_check }}
{{- end }}
{{- if .max_concurrent }}
max_concurrent {{ .max_concurrent }}
{{- end }}
}
{% endfor %}
{{- end }}
{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
{{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
{{- end }}
}
{% endfor %}
{{- end }}
{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts: |
{% for h in kubernetes.coredns.dns_etc_hosts %}
{{ h }}
{% endfor %}
{% endif %}
{{- range .kubernetes.coredns.dns_etc_hosts }}
{{ $. }}
{{- end }}
{{- end }}

View File

@ -43,7 +43,7 @@ spec:
operator: "Exists"
containers:
- name: node-cache
image: {{ kubernetes.networking.dns_cache_image }}
image: {{ .kubernetes.networking.dns_cache_image }}
resources:
limits:
memory: 200Mi
@ -112,118 +112,118 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% for ez in kubernetes.coredns.external_zones %}
{{ ez.zones|join:" " }}{
log
errors
loadbalance
cache {{ ez.cache }}
reload
loop
bind 169.254.25.10
prometheus :9253
{{- range .kubernetes.coredns.external_zones }}
{{ .zones | join " " }}{
log
errors
loadbalance
cache {{ index $ez "cache" }}
reload
loop
bind 169.254.25.10
prometheus :9253
{% for r in ez.rewrite %}
rewrite {{ r.rule }} {
{{ r.field }} {{ r.type }} {{ r.value }}
{{ r.options }}
}
{% endfor %}
{{- range .rewrite }}
rewrite {{ .rule }} {
{{ .field }} {{ .type }} {{ .value }}
{{ .options }}
}
{{- end }}
{% for f in ez.forward %}
forward {{ f.from }} {{ f.to|join:" " }} {
{% if (f.except|length > 0) %}
except {{ f.except|join:" " }}
{% endif %}
{% if (f.force_tcp) %}
force_tcp
{% endif %}
{% if (f.prefer_udp) %}
prefer_udp
{% endif %}
max_fails {{ f.max_fails|default_if_none:2 }}
expire {{ f.expire|default_if_none:"10s" }}
{% if (f.tls|defined) %}
tls {{ f.tls.cert_file|default_if_none:'""' }} {{ f.tls.key_file|default_if_none:'""' }} {{ f.tls.ca_file|default_if_none:'""' }}
{% endif %}
{% if (f.tls_servername|defined) %}
tls_servername {{ f.tls_servername }}
{% endif %}
{% if (f.policy|defined) %}
policy {{ f.policy }}
{% endif %}
{% if (f.health_check|defined) %}
health_check {{ f.health_check }}
{% endif %}
{% if (f.max_concurrent|defined) %}
max_concurrent {{ f.max_concurrent }}
{% endif %}
}
{% endfor %}
{{- range .forward }}
forward {{ .from }} {{ .to | join " " }} {
{{- if .except | len | lt 0 }}
except {{ .except | join " " }}
{{- end }}
{{- if .force_tcp }}
force_tcp
{{- end }}
{{ if .prefer_udp }}
prefer_udp
{{- end }}
max_fails {{ .max_fails | default 2 }}
expire {{ .expire | default "10s" }}
{{- if .tls }}
tls {{ .tls.cert_file }} {{ i.tls.key_file }} {{ .tls.ca_file }}
{{- end }}
{{- if .tls_servername }}
tls_servername {{ .tls_servername }}
{{- end }}
{{- if .policy }}
policy {{ .policy }}
{{- end }}
{{- if .health_check }}
health_check {{ .health_check }}
{{- end }}
{{- if .max_concurrent }}
max_concurrent {{ .max_concurrent }}
{{- end }}
}
{{- end }}
{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
{{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
hosts /etc/coredns/hosts {
fallthrough
}
{{- end }}
}
{% endfor %}
{{- end }}
{{ kubernetes.networking.dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind 169.254.25.10
forward . {{ kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
health 169.254.25.10:9254
{{ .kubernetes.networking.dns_domain }}:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind 169.254.25.10
forward . {{ .kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
health 169.254.25.10:9254
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ .kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
errors
cache 30
reload
loop
bind 169.254.25.10
forward . {{ .kubernetes.networking.dns_service_ip }} {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . /etc/resolv.conf
prometheus :9253
{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
hosts /etc/coredns/hosts {
fallthrough
}
{% endif %}
errors
cache 30
reload
loop
bind 169.254.25.10
forward . /etc/resolv.conf
prometheus :9253
{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
hosts /etc/coredns/hosts {
fallthrough
}
{{- end }}
}
{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts: |
{% for h in kubernetes.coredns.dns_etc_hosts %}
{{ h }}
{% endfor %}
{% endif %}
{{- range .kubernetes.coredns.dns_etc_hosts }}
{{ . }}
{{- end }}
{{- end }}

View File

@ -12,7 +12,7 @@ defaults
retries 5
timeout http-request 5m
timeout queue 5m
timeout connect 30s
timeout connectorVars 30s
timeout client 30s
timeout server 15m
timeout http-keep-alive 30s
@ -20,12 +20,12 @@ defaults
maxconn 4000
frontend healthz
bind *:{{ kubernetes.haproxy.health_port }}
bind *:{{ .kubernetes.haproxy.health_port }}
mode http
monitor-uri /healthz
frontend kube_api_frontend
bind 127.0.0.1:{{ kubernetes.apiserver.port }}
bind 127.0.0.1:{{ .kubernetes.apiserver.port }}
mode tcp
option tcplog
default_backend kube_api_backend
@ -36,6 +36,6 @@ backend kube_api_backend
default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
option httpchk GET /healthz
http-check expect status 200
{%for h in groups['kube_control_plane'] %}
server {{ h.inventory_name }} {{ h.internal_ipv4 }}:{{ kubernetes.apiserver.port }} check check-ssl verify none
{% endfor %}
{{- range .groups.kube_control_plane | default list }}
server {{ index $.inventory_hosts . "inventory_name" }} {{ index $.inventory_hosts . "internal_ipv4" }}:{{ $.kubernetes.apiserver.port }} check check-ssl verify none
{{- end }}

View File

@ -8,7 +8,7 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: kube-haproxy
annotations:
cfg-checksum: "{{ cfg_md5.stdout }}"
cfg-checksum: "{{ .cfg_md5.stdout }}"
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
@ -17,7 +17,7 @@ spec:
priorityClassName: system-node-critical
containers:
- name: haproxy
image: {{ kubernetes.haproxy.image }}
image: {{ .kubernetes.haproxy.image }}
imagePullPolicy: IfNotPresent
resources:
requests:
@ -26,11 +26,11 @@ spec:
livenessProbe:
httpGet:
path: /healthz
port: {{ kubernetes.haproxy.health_port }}
port: {{ .kubernetes.haproxy.health_port }}
readinessProbe:
httpGet:
path: /healthz
port: {{ kubernetes.haproxy.health_port }}
port: {{ .kubernetes.haproxy.health_port }}
volumeMounts:
- mountPath: /usr/local/etc/haproxy/
name: etc-haproxy

View File

@ -2,42 +2,40 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
etcd:
{% if (kubernetes.etcd.deployment_type=='internal') %}
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
local:
{% set etcd_image_info=kubernetes.etcd.image|split:":" %}
imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
imageTag: {{ etcd_image_info[1] }}
imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
serverCertSANs:
{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
{% endfor %}
{% else %}
{{- range .groups.etcd | default list }}
- https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
{{- end }}
{{- else }}
external:
endpoints:
{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
{% endfor %}
{{- range .groups.etcd | default list }}
- https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
{{- end }}
caFile: /etc/kubernetes/pki/etcd/ca.crt
certFile: /etc/kubernetes/pki/etcd/client.crt
keyFile: /etc/kubernetes/pki/etcd/client.key
{% endif %}
{{- end }}
dns:
type: CoreDNS
{% set core_image_info=kubernetes.networking.dns_image|split:":" %}
imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
imageTag: {{ core_image_info[1] }}
imageRepository: {{ kubernetes.image_repository }}
kubernetesVersion: {{ kube_version }}
imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
imageRepository: {{ .kubernetes.image_repository }}
kubernetesVersion: {{ .kube_version }}
certificatesDir: /etc/kubernetes/pki
clusterName: {{ kubernetes.cluster_name }}
controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
clusterName: {{ .kubernetes.cluster_name }}
controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
networking:
dnsDomain: {{ kubernetes.networking.dns_domain }}
podSubnet: {{ kubernetes.networking.pod_cidr }}
serviceSubnet: {{ kubernetes.networking.service_cidr }}
dnsDomain: {{ .kubernetes.networking.dns_domain }}
podSubnet: {{ .kubernetes.networking.pod_cidr }}
serviceSubnet: {{ .kubernetes.networking.service_cidr }}
apiServer:
extraArgs:
{% if (security_enhancement) %}
{{- if .security_enhancement }}
authorization-mode: Node,RBAC
enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
profiling: false
@ -45,62 +43,64 @@ apiServer:
service-account-lookup: true
tls-min-version: VersionTLS12
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
{% endif %}
{% if (kubernetes.audit) %}
{{- end }}
{{- if .kubernetes.audit }}
audit-log-format: json
audit-log-maxbackup: 2
audit-log-maxsize: 200
audit-policy-file: /etc/kubernetes/audit/policy.yaml
audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
{% endif %}
{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
certSANs:
- kubernetes
- kubernetes.default
- kubernetes.default.svc
- localhost
- 127.0.0.1
- {{ kubernetes.networking.service_cidr|ip_range:0 }}
- {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
- {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %}
- {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }}
{% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %}
- {{ h }}.{{ kubernetes.networking.dns_domain }}
- {{ hv.internal_ipv4 }}
{% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %}
{% endfor %}
{% for h in kubernetes.apiserver.certSANs %}
- {{ h }}
{% endfor %}
{% if (kubernetes.audit) %}
- {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
- {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
- kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
- kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
{{- range .groups.k8s_cluster | default list }}
- {{ . }}.{{ $.kubernetes.networking.dns_domain }}
- {{ index $.inventory_hosts . "internal_ipv4" }}
{{- if index $.inventory_hosts . "internal_ipv6" }}
- {{ index $.inventory_hosts . "internal_ipv6" }}
{{- end }}
{{- end }}
{{- range .kubernetes.apiserver.certSANs }}
- {{ . }}
{{- end }}
{{- if .kubernetes.audit }}
extraVolumes:
- name: k8s-audit
hostPath: /etc/kubernetes/audit
mountPath: /etc/kubernetes/audit
pathType: DirectoryOrCreate
{% endif %}
{{- end }}
controllerManager:
extraArgs:
{% if (internal_ipv6|defined) %}
node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
{{- if and .internal_ipv6 (ne .internal_ipv6 "") }}
node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
node-cidr-mask-size-ipv6: "64"
{% else %}
node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
{% endif %}
{% if (kube_version|version:'>=v1.9.0') %}
{{- else }}
node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
{{- end }}
{{- if .kube_version | semverCompare ">=v1.9.0" }}
cluster-signing-duration: 87600h
{% else %}
{{- else }}
experimental-cluster-signing-duration: 87600h
{% endif %}
{% if (security_enhancement) %}
{{- end }}
{{- if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
terminated-pod-gc-threshold: 50
use-service-account-credentials: true
{% else %}
{{- else }}
bind-address: 0.0.0.0
{% endif %}
{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
extraVolumes:
- name: host-time
hostPath: /etc/localtime
@ -108,43 +108,40 @@ controllerManager:
readOnly: true
scheduler:
extraArgs:
{% if (security_enhancement) %}
{{ if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
{% else %}
{{- else }}
bind-address: 0.0.0.0
{% endif %}
{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: {{ internal_ipv4 }}
bindPort: {{ kubernetes.apiserver.port }}
advertiseAddress: {{ .internal_ipv4 }}
bindPort: {{ .kubernetes.apiserver.port }}
nodeRegistration:
criSocket: {{ cri.cri_socket }}
criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
cgroup-driver: {{ cri.cgroup_driver }}
cgroup-driver: {{ .cri.cgroup_driver }}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: {{ kubernetes.networking.pod_cidr }}
mode: {{ kubernetes.kube_proxy.mode }}
{{ kubernetes.kube_proxy.config|to_yaml|safe }}
clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
mode: {{ .kubernetes.kube_proxy.mode }}
{{ .kubernetes.kube_proxy.config | toYaml }}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
clusterDomain: {{ kubernetes.networking.dns_domain }}
clusterDomain: {{ .kubernetes.networking.dns_domain }}
clusterDNS:
- {{ kubernetes.networking.dns_service_ip }}
maxPods: {{ kubernetes.max_pods }}
podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }}
- {{ .kubernetes.networking.dns_service_ip }}
maxPods: {{ .kubernetes.max_pods }}
podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
rotateCertificates: true
kubeReserved:
cpu: 200m
@ -161,8 +158,7 @@ evictionSoftGracePeriod:
memory.available: 2m
evictionMaxPodGracePeriod: 120
evictionPressureTransitionPeriod: 30s
{% if (security_enhancement) %}
{{- if .security_enhancement }}
readOnlyPort: 0
protectKernelDefaults: true
eventRecordQPS: 1
@ -175,25 +171,30 @@ tlsCipherSuites:
featureGates:
RotateKubeletServerCertificate: true
SeccompDefault: true
{% if (kube_version|version:">=v1.24.0") %}
{{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
{% endif %}
{% if (kube_version|version:">=v1.21.0") %}
{{- end }}
{{ if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
{% endif %}
{{ features|to_yaml:2|safe }}
{% else %}
{{- end }}
{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
{{- else }}
featureGates:
RotateKubeletServerCertificate: true
{% if (kube_version|version:">=v1.24.0") %}
{{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
{% endif %}
{% if (kube_version|version:">=v1.21.0") %}
{{- end }}
{{- if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
ExpandCSIVolumes: true
{% endif %}
{{ features|to_yaml:2|safe }}
{% endif %}
cgroupDriver: {{ cri.cgroup_driver }}
containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }}
containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }}
{{- end }}
{{- if .kubernetes.kubelet.feature_gates }}
{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
{{- end }}
{{- end }}
cgroupDriver: {{ .cri.cgroup_driver }}
containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
{{- if .kubernetes.kubelet.extra_args }}
{{ .kubernetes.kubelet.extra_args | toYaml }}
{{- end }}

View File

@ -2,41 +2,39 @@
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
etcd:
{% if (kubernetes.etcd.deployment_type=='internal') %}
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
local:
{% set etcd_image_info=kubernetes.etcd.image|split:":" %}
imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
imageTag: {{ etcd_image_info[1] }}
imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
serverCertSANs:
{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
{% endfor %}
{% else %}
{{- range .groups.etcd | default list }}
- https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
{{- end }}
{{- else }}
external:
endpoints:
{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
{% endfor %}
{{- range .groups.etcd | default list }}
- https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
{{- end }}
caFile: /etc/kubernetes/pki/etcd/ca.crt
certFile: /etc/kubernetes/pki/etcd/client.crt
keyFile: /etc/kubernetes/pki/etcd/client.key
{% endif %}
{{- end }}
dns:
{% set core_image_info=kubernetes.networking.dns_image|split:":" %}
imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
imageTag: {{ core_image_info[1] }}
imageRepository: {{ kubernetes.image_repository }}
kubernetesVersion: {{ kube_version }}
imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
imageRepository: {{ .kubernetes.image_repository }}
kubernetesVersion: {{ .kube_version }}
certificatesDir: /etc/kubernetes/pki
clusterName: {{ kubernetes.cluster_name }}
controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
clusterName: {{ .kubernetes.cluster_name }}
controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
networking:
dnsDomain: {{ kubernetes.networking.dns_domain }}
podSubnet: {{ kubernetes.networking.pod_cidr }}
serviceSubnet: {{ kubernetes.networking.service_cidr }}
dnsDomain: {{ .kubernetes.networking.dns_domain }}
podSubnet: {{ .kubernetes.networking.pod_cidr }}
serviceSubnet: {{ .kubernetes.networking.service_cidr }}
apiServer:
extraArgs:
{% if (security_enhancement) %}
{{- if .security_enhancement }}
authorization-mode: Node,RBAC
enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
profiling: false
@ -44,62 +42,64 @@ apiServer:
service-account-lookup: true
tls-min-version: VersionTLS12
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
{% endif %}
{% if (kubernetes.audit) %}
{{- end }}
{{- if .kubernetes.audit }}
audit-log-format: json
audit-log-maxbackup: 2
audit-log-maxsize: 200
audit-policy-file: /etc/kubernetes/audit/policy.yaml
audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
{% endif %}
{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
certSANs:
- kubernetes
- kubernetes.default
- kubernetes.default.svc
- localhost
- 127.0.0.1
- {{ kubernetes.networking.service_cidr|ip_range:0 }}
- {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
- {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %}
- {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }}
{% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %}
- {{ h }}.{{ kubernetes.networking.dns_domain }}
- {{ hv.internal_ipv4 }}
{% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %}
{% endfor %}
{% for h in kubernetes.apiserver.certSANs %}
- {{ h }}
{% endfor %}
{% if (kubernetes.audit) %}
- {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
- {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
- kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
- kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
{{- range .groups.k8s_cluster | default list }}
- {{ . }}.{{ .kubernetes.networking.dns_domain }}
- {{ index $.inventory_hosts . "internal_ipv4" }}
{{- if index $.inventory_hosts . "internal_ipv6" }}
- {{ index $.inventory_hosts . "internal_ipv6" }}
{{- end }}
{{- end }}
{{- range .kubernetes.apiserver.certSANs }}
- {{ . }}
{{- end }}
{{- if .kubernetes.audit }}
extraVolumes:
- name: k8s-audit
hostPath: /etc/kubernetes/audit
mountPath: /etc/kubernetes/audit
pathType: DirectoryOrCreate
{% endif %}
{{- end }}
controllerManager:
extraArgs:
{% if (internal_ipv6|defined) %}
node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
{{- if and .internal_ipv6 (ne .internal_ipv6 "") }}
node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
node-cidr-mask-size-ipv6: "64"
{% else %}
node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
{% endif %}
{% if (kube_version|version:'>=v1.9.0') %}
{{- else }}
node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
{{- end }}
{{- if .kube_version | semverCompare ">=v1.9.0" }}
cluster-signing-duration: 87600h
{% else %}
{{- else }}
experimental-cluster-signing-duration: 87600h
{% endif %}
{% if (security_enhancement) %}
{{- end }}
{{- if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
terminated-pod-gc-threshold: 50
use-service-account-credentials: true
{% else %}
{{- else }}
bind-address: 0.0.0.0
{% endif %}
{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
extraVolumes:
- name: host-time
hostPath: /etc/localtime
@ -107,43 +107,40 @@ controllerManager:
readOnly: true
scheduler:
extraArgs:
{% if (security_enhancement) %}
{{ if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
{% else %}
{{- else }}
bind-address: 0.0.0.0
{% endif %}
{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }}
{{- end }}
{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: {{ internal_ipv4 }}
bindPort: {{ kubernetes.apiserver.port }}
advertiseAddress: {{ .internal_ipv4 }}
bindPort: {{ .kubernetes.apiserver.port }}
nodeRegistration:
criSocket: {{ cri.cri_socket }}
criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
cgroup-driver: {{ cri.cgroup_driver }}
cgroup-driver: {{ .cri.cgroup_driver }}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: {{ kubernetes.networking.pod_cidr }}
mode: {{ kubernetes.kube_proxy.mode }}
{{ kubernetes.kube_proxy.config|to_yaml|safe }}
clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
mode: {{ .kubernetes.kube_proxy.mode }}
{{ .kubernetes.kube_proxy.config | toYaml }}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
clusterDomain: {{ kubernetes.networking.dns_domain }}
clusterDomain: {{ .kubernetes.networking.dns_domain }}
clusterDNS:
- {{ kubernetes.networking.dns_service_ip }}
maxPods: {{ kubernetes.max_pods }}
podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }}
- {{ .kubernetes.networking.dns_service_ip }}
maxPods: {{ .kubernetes.max_pods }}
podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
rotateCertificates: true
kubeReserved:
cpu: 200m
@ -160,8 +157,7 @@ evictionSoftGracePeriod:
memory.available: 2m
evictionMaxPodGracePeriod: 120
evictionPressureTransitionPeriod: 30s
{% if (security_enhancement) %}
{{- if .security_enhancement }}
readOnlyPort: 0
protectKernelDefaults: true
eventRecordQPS: 1
@ -174,25 +170,26 @@ tlsCipherSuites:
featureGates:
RotateKubeletServerCertificate: true
SeccompDefault: true
{% if (kube_version|version:">=v1.24.0") %}
{{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
{% endif %}
{% if (kube_version|version:">=v1.21.0") %}
{{- end }}
{{ if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
{% endif %}
{{ features|to_yaml:2|safe }}
{% else %}
{{- end }}
{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
{{- else }}
featureGates:
RotateKubeletServerCertificate: true
{% if (kube_version|version:">=v1.24.0") %}
{{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
{% endif %}
{% if (kube_version|version:">=v1.21.0") %}
{{- end }}
{{- if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
ExpandCSIVolumes: true
{% endif %}
{{ features|to_yaml:2|safe }}
{% endif %}
cgroupDriver: {{ cri.cgroup_driver }}
containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }}
containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }}
{{- end }}
{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
{{- end }}
cgroupDriver: {{ .cri.cgroup_driver }}
containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
{{ .kubernetes.kubelet.extra_args | toYaml }}

View File

@ -3,17 +3,17 @@ apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
bootstrapToken:
apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
token: "{{ kubeadm_token }}"
apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
token: "{{ .kubeadm_token }}"
unsafeSkipCAVerification: true
{% if (inventory_name in groups['kube_control_plane']) %}
{{- if .groups.kube_control_plane | default list | has .inventory_name }}
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ internal_ipv4 }}
bindPort: {{ kubernetes.apiserver.port }}
certificateKey: {{ kubeadm_cert }}
{% endif %}
advertiseAddress: {{ .internal_ipv4 }}
bindPort: {{ .kubernetes.apiserver.port }}
certificateKey: {{ .kubeadm_cert }}
{{- end }}
nodeRegistration:
criSocket: {{ cri.cri_socket }}
criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
cgroup-driver: {{ cri.cgroup_driver }}
cgroup-driver: {{ .cri.cgroup_driver }}

View File

@ -3,17 +3,17 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
token: "{{ kubeadm_token }}"
apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
token: "{{ .kubeadm_token }}"
unsafeSkipCAVerification: true
{% if (inventory_name in groups['kube_control_plane']) %}
{{- if .groups.kube_control_plane | default list | has .inventory_name }}
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ internal_ipv4 }}
bindPort: {{ kubernetes.apiserver.port }}
certificateKey: {{ kubeadm_cert }}
{% endif %}
advertiseAddress: {{ .internal_ipv4 }}
bindPort: {{ .kubernetes.apiserver.port }}
certificateKey: {{ .kubeadm_cert }}
{{- end }}
nodeRegistration:
criSocket: {{ cri.cri_socket }}
criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
cgroup-driver: {{ cri.cgroup_driver }}
cgroup-driver: {{ .cri.cgroup_driver }}

View File

@ -7,7 +7,7 @@ EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
Environment="KUBELET_EXTRA_ARGS=--node-ip={{ internal_ipv4 }} --hostname-override={{ inventory_name }} {%for k,v in kubernetes.kubelet.extra_args %}--{{k}} {{v}} {% endfor %}"
Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .inventory_name }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

Some files were not shown because too many files have changed in this diff Show More