From bdef602ddffb89ff70a352acb99e76f94184bb40 Mon Sep 17 00:00:00 2001 From: joyceliu Date: Tue, 12 Mar 2024 17:39:09 +0800 Subject: [PATCH] feat: add create cluster command Signed-off-by: joyceliu --- {project => builtin}/Makefile | 6 +- builtin/const.go | 27 + {project => builtin}/fs.go | 9 +- builtin/inventory/config.yaml | 48 + {project => builtin}/inventory/inventory.yaml | 10 +- builtin/playbooks/create_cluster.yaml | 10 + builtin/playbooks/init.yaml | 12 + builtin/playbooks/install.yaml | 39 + builtin/playbooks/post_install.yaml | 19 + builtin/playbooks/pre_install.yaml | 19 + {project => builtin}/playbooks/precheck.yaml | 18 +- builtin/roles/addons/cni/defaults/main.yaml | 49 + builtin/roles/addons/cni/tasks/calico.yaml | 9 + builtin/roles/addons/cni/tasks/cilium.yaml | 21 + builtin/roles/addons/cni/tasks/flannel.yaml | 10 + builtin/roles/addons/cni/tasks/hybridnet.yaml | 17 + builtin/roles/addons/cni/tasks/kubeovn.yaml | 24 + builtin/roles/addons/cni/tasks/main.yaml | 18 + builtin/roles/addons/cni/tasks/multus.yaml | 9 + .../addons/cni/templates/calico/pdg.yaml | 35 + .../addons/cni/templates/calico/v3.27.yaml | 5339 +++++++++++++++++ .../addons/cni/templates/flannel/flannel.yaml | 213 + .../addons/cni/templates/multus.deployment | 206 + .../addons/cni/templates/multus/multus.yaml | 206 + builtin/roles/addons/kata/defaults/main.yaml | 3 + builtin/roles/addons/kata/tasks/main.yaml | 11 + .../addons/kata/templates/kata-deploy.yaml | 127 + builtin/roles/addons/nfd/defaults/main.yaml | 3 + builtin/roles/addons/nfd/tasks/main.yaml | 11 + .../addons/nfd/templates/nfd-deploy.yaml | 621 ++ builtin/roles/addons/sc/defaults/main.yaml | 12 + builtin/roles/addons/sc/tasks/local.yaml | 9 + builtin/roles/addons/sc/tasks/main.yaml | 6 + builtin/roles/addons/sc/tasks/nfs.yaml | 11 + .../addons/sc/templates/local-volume.yaml | 150 + .../init/init-artifact/defaults/main.yaml | 101 + .../init-artifact/tasks/download_by_curl.yaml | 284 + .../init-artifact/tasks/download_by_helm.yaml | 44 + .../init-artifact/tasks/download_by_oras.yaml | 10 + .../roles/init/init-artifact/tasks/main.yaml | 28 + .../roles/init/init-artifact/tasks/pki.yaml | 34 + builtin/roles/init/init-os/defaults/main.yaml | 2 + .../init/init-os/tasks/init_ntpserver.yaml | 42 + .../init/init-os/tasks/init_repository.yaml | 75 + builtin/roles/init/init-os/tasks/main.yaml | 25 + .../roles/init/init-os/templates/init-os.sh | 193 + .../roles/install/certs/defaults/main.yaml | 4 + .../certs/files/k8s-certs-renew.service | 5 + .../install/certs/files/k8s-certs-renew.timer | 7 + builtin/roles/install/certs/tasks/main.yaml | 20 + .../install/certs/templates/renew_script.sh | 29 + builtin/roles/install/cri/defaults/main.yaml | 12 + .../install/cri/files/containerd.service | 26 + .../install/cri/files/cri_docker.service | 36 + .../roles/install/cri/files/docker.service | 47 + .../install/cri/tasks/install_containerd.yaml | 45 + .../install/cri/tasks/install_crictl.yaml | 21 + .../install/cri/tasks/install_cridockerd.yaml | 33 + .../install/cri/tasks/install_docker.yaml | 40 + builtin/roles/install/cri/tasks/main.yaml | 19 + .../install/cri/templates/containerd.config | 76 + .../roles/install/cri/templates/crictl.config | 5 + .../roles/install/cri/templates/docker.config | 19 + builtin/roles/install/etcd/defaults/main.yaml | 24 + .../roles/install/etcd/files/backup.service | 5 + builtin/roles/install/etcd/files/etcd.service | 16 + .../roles/install/etcd/tasks/backup_etcd.yaml | 29 + .../install/etcd/tasks/install_etcd.yaml | 38 + builtin/roles/install/etcd/tasks/main.yaml | 26 + .../install/etcd/templates/backup.script | 33 + .../roles/install/etcd/templates/backup.timer | 7 + builtin/roles/install/etcd/templates/etcd.env | 53 + .../install/image-registry/defaults/main.yaml | 44 + .../image-registry/files/containerd.service | 26 + .../image-registry/files/docker.service | 47 + .../image-registry/tasks/install_docker.yaml | 40 + .../tasks/install_docker_compose.yaml | 13 + .../image-registry/tasks/install_harbor.yaml | 57 + .../tasks/install_keepalived.yaml | 19 + .../tasks/install_registry.yaml | 65 + .../image-registry/tasks/load_images.yaml | 51 + .../install/image-registry/tasks/main.yaml | 16 + .../image-registry/templates/docker.config | 19 + .../image-registry/templates/harbor.config | 311 + .../image-registry/templates/harbor.service | 12 + .../templates/harbor_keepalive.docker-compose | 26 + .../templates/keepalived.config | 31 + .../templates/keepalived.healthcheck | 17 + .../image-registry/templates/registry.config | 218 + .../templates/registry.docker-compose | 54 + .../image-registry/templates/registry.service | 12 + .../install/kubernetes/defaults/main.yaml | 161 + .../kubernetes/files/audit/audit_policy.yaml | 123 + .../kubernetes/files/audit/audit_webhook.yaml | 15 + .../install/kubernetes/files/kubelet.service | 15 + .../kubernetes/tasks/deploy_cluster_dns.yaml | 21 + .../kubernetes/tasks/deploy_haproxy.yaml | 15 + .../kubernetes/tasks/deploy_kube_vip.yaml | 31 + .../kubernetes/tasks/init_kubernetes.yaml | 76 + .../tasks/install_kube_binaries.yaml | 69 + .../kubernetes/tasks/join_kubernetes.yaml | 38 + .../roles/install/kubernetes/tasks/main.yaml | 64 + .../templates/dns/coredns.deployment | 241 + .../templates/dns/nodelocaldns.daemonset | 237 + .../kubernetes/templates/haproxy/haproxy.cfg | 41 + .../kubernetes/templates/haproxy/haproxy.yaml | 41 + .../templates/kubeadm/kubeadm-init.v1beta2 | 199 + .../templates/kubeadm/kubeadm-init.v1beta3 | 198 + .../templates/kubeadm/kubeadm-join.v1beta2 | 20 + .../templates/kubeadm/kubeadm-join.v1beta3 | 20 + .../kubernetes/templates/kubeadm/kubelet.env | 13 + .../kubernetes/templates/kubevip/kubevip.ARP | 65 + .../kubernetes/templates/kubevip/kubevip.BGP | 72 + builtin/roles/install/nfs/defaults/main.yaml | 3 + builtin/roles/install/nfs/tasks/debian.yaml | 28 + builtin/roles/install/nfs/tasks/main.yaml | 6 + builtin/roles/install/nfs/tasks/rhel.yaml | 28 + builtin/roles/install/nfs/templates/exports | 3 + .../roles/install/security/defaults/main.yaml | 1 + .../roles/install/security/tasks/main.yaml | 39 + .../precheck/artifact_check/tasks/main.yaml | 21 + .../precheck/env_check/defaults/main.yaml | 25 + .../roles/precheck/env_check/tasks/main.yaml | 115 + cmd/controller-manager/app/options/options.go | 3 + cmd/kk/app/create.go | 74 + cmd/kk/app/options/common.go | 97 +- cmd/kk/app/options/create.go | 98 + cmd/kk/app/options/precheck.go | 18 +- cmd/kk/app/options/run.go | 31 +- cmd/kk/app/precheck.go | 9 +- cmd/kk/app/run.go | 9 +- cmd/kk/app/server.go | 17 +- .../crds/kubekey.kubesphere.io_pipelines.yaml | 3 - example/Makefile | 22 +- example/config.yaml | 19 - example/inventory.yaml | 26 - example/pipeline.yaml | 18 - go.mod | 6 +- go.sum | 8 +- pkg/apis/kubekey/v1/config_types.go | 27 +- pkg/apis/kubekey/v1/config_types_test.go | 62 + pkg/apis/kubekey/v1/inventory_types.go | 1 - pkg/apis/kubekey/v1/pipeline_types.go | 6 +- pkg/apis/kubekey/v1alpha1/task_types.go | 7 +- pkg/connector/connector.go | 72 +- pkg/connector/local_connector.go | 6 +- pkg/connector/local_connector_test.go | 2 +- pkg/connector/ssh_connector.go | 25 +- pkg/const/common.go | 11 +- pkg/const/workdir.go | 3 - pkg/controllers/pipeline_controller.go | 25 +- pkg/controllers/task_controller.go | 540 -- pkg/converter/converter.go | 342 +- pkg/converter/converter_test.go | 110 - .../testdata/playbooks/playbook1.yaml | 30 - .../testdata/roles/role1/tasks/main.yaml | 3 - pkg/converter/tmpl/filter_extension.go | 131 +- pkg/converter/tmpl/filter_extension_test.go | 47 +- pkg/converter/tmpl/helper.go | 139 + pkg/converter/tmpl/template.go | 24 +- pkg/converter/tmpl/template_test.go | 52 +- pkg/executor/executor.go | 533 ++ pkg/{task => executor}/helper.go | 21 +- pkg/{task => executor}/helper_test.go | 2 +- pkg/manager/command_manager.go | 54 +- pkg/manager/controller_manager.go | 28 +- pkg/modules/assert.go | 39 +- pkg/modules/assert_test.go | 5 +- pkg/modules/command.go | 35 +- pkg/modules/command_test.go | 2 +- pkg/modules/copy.go | 226 +- pkg/modules/copy_test.go | 12 +- pkg/modules/debug.go | 44 +- pkg/modules/debug_test.go | 5 +- pkg/modules/fetch.go | 73 + pkg/modules/fetch_test.go | 72 + pkg/modules/gen_cert.go | 429 ++ pkg/modules/gen_cert_test.go | 52 + pkg/modules/module.go | 22 + pkg/modules/module_test.go | 8 +- pkg/modules/set_fact.go | 36 +- pkg/modules/template.go | 217 +- pkg/modules/template_test.go | 4 +- pkg/project/builtin.go | 98 + pkg/project/git.go | 161 + pkg/project/helper.go | 238 +- pkg/project/helper_test.go | 186 +- pkg/project/local.go | 111 + pkg/project/project.go | 42 +- pkg/project/project_git.go | 98 - pkg/project/project_local.go | 53 - pkg/proxy/api_resources.go | 14 +- pkg/proxy/internal/watcher.go | 23 +- pkg/proxy/router.go | 16 + pkg/proxy/transport.go | 5 + pkg/task/controller.go | 68 - pkg/task/internal.go | 472 -- pkg/variable/helper.go | 310 +- pkg/variable/helper_test.go | 189 +- pkg/variable/internal.go | 317 +- pkg/variable/internal_test.go | 84 +- pkg/variable/variable.go | 514 +- project/inventory/config.yaml | 72 - .../precheck/artifact_check/tasks/main.yaml | 21 - .../roles/precheck/env_check/tasks/main.yaml | 114 - 205 files changed, 15792 insertions(+), 3428 deletions(-) rename {project => builtin}/Makefile (61%) create mode 100644 builtin/const.go rename {project => builtin}/fs.go (84%) create mode 100644 builtin/inventory/config.yaml rename {project => builtin}/inventory/inventory.yaml (84%) create mode 100644 builtin/playbooks/create_cluster.yaml create mode 100644 builtin/playbooks/init.yaml create mode 100644 builtin/playbooks/install.yaml create mode 100644 builtin/playbooks/post_install.yaml create mode 100644 builtin/playbooks/pre_install.yaml rename {project => builtin}/playbooks/precheck.yaml (68%) create mode 100644 builtin/roles/addons/cni/defaults/main.yaml create mode 100644 builtin/roles/addons/cni/tasks/calico.yaml create mode 100644 builtin/roles/addons/cni/tasks/cilium.yaml create mode 100644 builtin/roles/addons/cni/tasks/flannel.yaml create mode 100644 builtin/roles/addons/cni/tasks/hybridnet.yaml create mode 100644 builtin/roles/addons/cni/tasks/kubeovn.yaml create mode 100644 builtin/roles/addons/cni/tasks/main.yaml create mode 100644 builtin/roles/addons/cni/tasks/multus.yaml create mode 100644 builtin/roles/addons/cni/templates/calico/pdg.yaml create mode 100644 builtin/roles/addons/cni/templates/calico/v3.27.yaml create mode 100644 builtin/roles/addons/cni/templates/flannel/flannel.yaml create mode 100644 builtin/roles/addons/cni/templates/multus.deployment create mode 100644 builtin/roles/addons/cni/templates/multus/multus.yaml create mode 100644 builtin/roles/addons/kata/defaults/main.yaml create mode 100644 builtin/roles/addons/kata/tasks/main.yaml create mode 100644 builtin/roles/addons/kata/templates/kata-deploy.yaml create mode 100644 builtin/roles/addons/nfd/defaults/main.yaml create mode 100644 builtin/roles/addons/nfd/tasks/main.yaml create mode 100644 builtin/roles/addons/nfd/templates/nfd-deploy.yaml create mode 100644 builtin/roles/addons/sc/defaults/main.yaml create mode 100644 builtin/roles/addons/sc/tasks/local.yaml create mode 100644 builtin/roles/addons/sc/tasks/main.yaml create mode 100644 builtin/roles/addons/sc/tasks/nfs.yaml create mode 100644 builtin/roles/addons/sc/templates/local-volume.yaml create mode 100644 builtin/roles/init/init-artifact/defaults/main.yaml create mode 100644 builtin/roles/init/init-artifact/tasks/download_by_curl.yaml create mode 100644 builtin/roles/init/init-artifact/tasks/download_by_helm.yaml create mode 100644 builtin/roles/init/init-artifact/tasks/download_by_oras.yaml create mode 100644 builtin/roles/init/init-artifact/tasks/main.yaml create mode 100644 builtin/roles/init/init-artifact/tasks/pki.yaml create mode 100644 builtin/roles/init/init-os/defaults/main.yaml create mode 100644 builtin/roles/init/init-os/tasks/init_ntpserver.yaml create mode 100644 builtin/roles/init/init-os/tasks/init_repository.yaml create mode 100644 builtin/roles/init/init-os/tasks/main.yaml create mode 100644 builtin/roles/init/init-os/templates/init-os.sh create mode 100644 builtin/roles/install/certs/defaults/main.yaml create mode 100644 builtin/roles/install/certs/files/k8s-certs-renew.service create mode 100644 builtin/roles/install/certs/files/k8s-certs-renew.timer create mode 100644 builtin/roles/install/certs/tasks/main.yaml create mode 100644 builtin/roles/install/certs/templates/renew_script.sh create mode 100644 builtin/roles/install/cri/defaults/main.yaml create mode 100644 builtin/roles/install/cri/files/containerd.service create mode 100644 builtin/roles/install/cri/files/cri_docker.service create mode 100644 builtin/roles/install/cri/files/docker.service create mode 100644 builtin/roles/install/cri/tasks/install_containerd.yaml create mode 100644 builtin/roles/install/cri/tasks/install_crictl.yaml create mode 100644 builtin/roles/install/cri/tasks/install_cridockerd.yaml create mode 100644 builtin/roles/install/cri/tasks/install_docker.yaml create mode 100644 builtin/roles/install/cri/tasks/main.yaml create mode 100644 builtin/roles/install/cri/templates/containerd.config create mode 100644 builtin/roles/install/cri/templates/crictl.config create mode 100644 builtin/roles/install/cri/templates/docker.config create mode 100644 builtin/roles/install/etcd/defaults/main.yaml create mode 100644 builtin/roles/install/etcd/files/backup.service create mode 100644 builtin/roles/install/etcd/files/etcd.service create mode 100644 builtin/roles/install/etcd/tasks/backup_etcd.yaml create mode 100644 builtin/roles/install/etcd/tasks/install_etcd.yaml create mode 100644 builtin/roles/install/etcd/tasks/main.yaml create mode 100644 builtin/roles/install/etcd/templates/backup.script create mode 100644 builtin/roles/install/etcd/templates/backup.timer create mode 100644 builtin/roles/install/etcd/templates/etcd.env create mode 100644 builtin/roles/install/image-registry/defaults/main.yaml create mode 100644 builtin/roles/install/image-registry/files/containerd.service create mode 100644 builtin/roles/install/image-registry/files/docker.service create mode 100644 builtin/roles/install/image-registry/tasks/install_docker.yaml create mode 100644 builtin/roles/install/image-registry/tasks/install_docker_compose.yaml create mode 100644 builtin/roles/install/image-registry/tasks/install_harbor.yaml create mode 100644 builtin/roles/install/image-registry/tasks/install_keepalived.yaml create mode 100644 builtin/roles/install/image-registry/tasks/install_registry.yaml create mode 100644 builtin/roles/install/image-registry/tasks/load_images.yaml create mode 100644 builtin/roles/install/image-registry/tasks/main.yaml create mode 100644 builtin/roles/install/image-registry/templates/docker.config create mode 100644 builtin/roles/install/image-registry/templates/harbor.config create mode 100644 builtin/roles/install/image-registry/templates/harbor.service create mode 100644 builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose create mode 100644 builtin/roles/install/image-registry/templates/keepalived.config create mode 100644 builtin/roles/install/image-registry/templates/keepalived.healthcheck create mode 100644 builtin/roles/install/image-registry/templates/registry.config create mode 100644 builtin/roles/install/image-registry/templates/registry.docker-compose create mode 100644 builtin/roles/install/image-registry/templates/registry.service create mode 100644 builtin/roles/install/kubernetes/defaults/main.yaml create mode 100644 builtin/roles/install/kubernetes/files/audit/audit_policy.yaml create mode 100644 builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml create mode 100644 builtin/roles/install/kubernetes/files/kubelet.service create mode 100644 builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml create mode 100644 builtin/roles/install/kubernetes/tasks/main.yaml create mode 100644 builtin/roles/install/kubernetes/templates/dns/coredns.deployment create mode 100644 builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset create mode 100644 builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg create mode 100644 builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml create mode 100644 builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 create mode 100644 builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 create mode 100644 builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 create mode 100644 builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 create mode 100644 builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env create mode 100644 builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP create mode 100644 builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP create mode 100644 builtin/roles/install/nfs/defaults/main.yaml create mode 100644 builtin/roles/install/nfs/tasks/debian.yaml create mode 100644 builtin/roles/install/nfs/tasks/main.yaml create mode 100644 builtin/roles/install/nfs/tasks/rhel.yaml create mode 100644 builtin/roles/install/nfs/templates/exports create mode 100644 builtin/roles/install/security/defaults/main.yaml create mode 100644 builtin/roles/install/security/tasks/main.yaml create mode 100644 builtin/roles/precheck/artifact_check/tasks/main.yaml create mode 100644 builtin/roles/precheck/env_check/defaults/main.yaml create mode 100644 builtin/roles/precheck/env_check/tasks/main.yaml create mode 100644 cmd/kk/app/create.go create mode 100644 cmd/kk/app/options/create.go delete mode 100644 example/config.yaml delete mode 100644 example/inventory.yaml delete mode 100644 example/pipeline.yaml create mode 100644 pkg/apis/kubekey/v1/config_types_test.go delete mode 100644 pkg/controllers/task_controller.go delete mode 100644 pkg/converter/testdata/playbooks/playbook1.yaml delete mode 100644 pkg/converter/testdata/roles/role1/tasks/main.yaml create mode 100644 pkg/converter/tmpl/helper.go create mode 100644 pkg/executor/executor.go rename pkg/{task => executor}/helper.go (89%) rename pkg/{task => executor}/helper_test.go (99%) create mode 100644 pkg/modules/fetch.go create mode 100644 pkg/modules/fetch_test.go create mode 100644 pkg/modules/gen_cert.go create mode 100644 pkg/modules/gen_cert_test.go create mode 100644 pkg/project/builtin.go create mode 100644 pkg/project/git.go create mode 100644 pkg/project/local.go delete mode 100644 pkg/project/project_git.go delete mode 100644 pkg/project/project_local.go delete mode 100644 pkg/task/controller.go delete mode 100644 pkg/task/internal.go delete mode 100644 project/inventory/config.yaml delete mode 100644 project/roles/precheck/artifact_check/tasks/main.yaml delete mode 100644 project/roles/precheck/env_check/tasks/main.yaml diff --git a/project/Makefile b/builtin/Makefile similarity index 61% rename from project/Makefile rename to builtin/Makefile index 3566cd46..a30cc158 100644 --- a/project/Makefile +++ b/builtin/Makefile @@ -3,12 +3,13 @@ create-role: ## create a role necessary file in roles @echo "Creating role $(role)..." @mkdir -p roles/$(role)/tasks @echo "---" > roles/$(role)/tasks/main.yaml + @mkdir -p roles/$(role)/defaults + @echo "" > roles/$(role)/defaults/main.yaml ifeq ($(VARIABLE_NAME),"full") @mkdir -p roles/$(role)/handlers @mkdir -p roles/$(role)/templates @mkdir -p roles/$(role)/files @mkdir -p roles/$(role)/vars - @mkdir -p roles/$(role)/defaults @mkdir -p roles/$(role)/meta @echo "---" > roles/$(role)/handlers/main.yaml @echo "---" > roles/$(role)/templates/main.yaml @@ -19,3 +20,6 @@ ifeq ($(VARIABLE_NAME),"full") endif @echo "Role $(role) created successfully" +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/builtin/const.go b/builtin/const.go new file mode 100644 index 00000000..0f2974eb --- /dev/null +++ b/builtin/const.go @@ -0,0 +1,27 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// the file contains two default file inventory.yaml and config.yaml + +package builtin + +import _ "embed" + +//go:embed inventory/inventory.yaml +var DefaultInventory []byte + +//go:embed inventory/config.yaml +var DefaultConfig []byte diff --git a/project/fs.go b/builtin/fs.go similarity index 84% rename from project/fs.go rename to builtin/fs.go index c4c08ae9..b76f336b 100644 --- a/project/fs.go +++ b/builtin/fs.go @@ -1,3 +1,6 @@ +//go:build builtin +// +build builtin + /* Copyright 2023 The KubeSphere Authors. @@ -14,11 +17,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package project +package builtin import ( "embed" ) -//go:embed inventory playbooks roles -var InternalPipeline embed.FS +//go:embed playbooks roles +var BuiltinPipeline embed.FS diff --git a/builtin/inventory/config.yaml b/builtin/inventory/config.yaml new file mode 100644 index 00000000..d9c1f372 --- /dev/null +++ b/builtin/inventory/config.yaml @@ -0,0 +1,48 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +metadata: + name: example +spec: + # zone for kk. how to download files +# kkzone: cn + # work_dir is the directory where the artifact is extracted. +# work_dir: /var/lib/kubekey/ + # cni binary + cni_version: v1.2.0 + # helm binary + helm_version: v3.14.2 + # docker-compose binary + dockercompose_version: v2.24.6 + # harbor image tag + harbor_version: v2.10.1 + # registry image tag + registry_version: 2.8.3 + # keepalived image tag + keepalived_version: stable + # runc binary + runc_version: v1.1.11 + # calicoctl binary + calico_version: v3.27.2 + # etcd binary + etcd_version: v3.5.6 + # crictl binary + crictl_version: v1.29.0 + # cilium helm + cilium_version: 1.15.4 + # kubeovn helm + kubeovn_version: 0.1.0 + # hybridnet helm + hybridnet_version: 0.6.8 + # containerd binary + containerd_version: v1.7.0 + # docker binary + docker_version: 24.0.6 + # cridockerd + cridockerd_version: v0.3.10 + # the version of kubernetes to be installed. + # should be greater than or equal to kube_version_min_required. + kube_version: v1.23.15 + # nfs provisioner helm version + nfs_provisioner_version: 4.0.18 + # oras binary + oras_version: v1.1.0 diff --git a/project/inventory/inventory.yaml b/builtin/inventory/inventory.yaml similarity index 84% rename from project/inventory/inventory.yaml rename to builtin/inventory/inventory.yaml index ccf638e1..ec50113f 100644 --- a/project/inventory/inventory.yaml +++ b/builtin/inventory/inventory.yaml @@ -25,19 +25,23 @@ spec: k8s_cluster: groups: - kube_control_plane - - kube_node + - kube_worker # control_plane nodes kube_control_plane: hosts: - localhost # worker nodes - kube_node: + kube_worker: hosts: - localhost # etcd nodes when etcd_deployment_type is external etcd: hosts: - localhost - registry: + image_registry: + hosts: + - localhost + # nfs nodes for registry storage. and kubernetes nfs storage + nfs: hosts: - localhost diff --git a/builtin/playbooks/create_cluster.yaml b/builtin/playbooks/create_cluster.yaml new file mode 100644 index 00000000..0ed280c3 --- /dev/null +++ b/builtin/playbooks/create_cluster.yaml @@ -0,0 +1,10 @@ +--- +- import_playbook: pre_install.yaml + +- import_playbook: precheck.yaml + +- import_playbook: init.yaml + +- import_playbook: install.yaml + +- import_playbook: post_install.yaml diff --git a/builtin/playbooks/init.yaml b/builtin/playbooks/init.yaml new file mode 100644 index 00000000..7d8d0398 --- /dev/null +++ b/builtin/playbooks/init.yaml @@ -0,0 +1,12 @@ +--- +- hosts: + - localhost + roles: + - init/init-artifact + +- hosts: + - etcd + - k8s_cluster + - registry + roles: + - init/init-os diff --git a/builtin/playbooks/install.yaml b/builtin/playbooks/install.yaml new file mode 100644 index 00000000..82db84f6 --- /dev/null +++ b/builtin/playbooks/install.yaml @@ -0,0 +1,39 @@ +--- +- hosts: + - nfs + gather_facts: true + roles: + - install/nfs + +- hosts: + - etcd + gather_facts: true + roles: + - install/etcd + +- hosts: + - image_registry + gather_facts: true + roles: + - install/image-registry + +- hosts: + - k8s_cluster + gather_facts: true + roles: + - install/cri + - install/kubernetes + +- hosts: + - kube_control_plane + roles: + - role: install/certs + when: renew_certs.enabled|default_if_none:false + +- hosts: + - k8s_cluster|random + roles: + - addons/cni + - addons/kata + - addons/nfd + - addons/sc diff --git a/builtin/playbooks/post_install.yaml b/builtin/playbooks/post_install.yaml new file mode 100644 index 00000000..e7789c81 --- /dev/null +++ b/builtin/playbooks/post_install.yaml @@ -0,0 +1,19 @@ +--- +- name: Execute post install scripts + hosts: + - all + tasks: + - name: Copy post install scripts to remote + ignore_errors: yes + copy: + src: "{{ work_dir }}/scripts/post_install_{{ inventory_name }}.sh" + dest: "/etc/kubekey/scripts/post_install_{{ inventory_name }}.sh" + - name: Execute post install scripts + command: | + for file in /etc/kubekey/scripts/post_install_*.sh; do + if [ -f $file ]; then + # execute file + chmod +x $file + $file + fi + done diff --git a/builtin/playbooks/pre_install.yaml b/builtin/playbooks/pre_install.yaml new file mode 100644 index 00000000..b9d951bb --- /dev/null +++ b/builtin/playbooks/pre_install.yaml @@ -0,0 +1,19 @@ +--- +- name: Execute pre install scripts + hosts: + - all + tasks: + - name: Copy pre install scripts to remote + ignore_errors: yes + copy: + src: "{{ work_dir }}/scripts/pre_install_{{ inventory_name }}.sh" + dest: "/etc/kubekey/scripts/pre_install_{{ inventory_name }}.sh" + - name: Execute pre install scripts + command: | + for file in /etc/kubekey/scripts/pre_install_*.sh; do + if [ -f $file ]; then + # execute file + chmod +x $file + $file + fi + done diff --git a/project/playbooks/precheck.yaml b/builtin/playbooks/precheck.yaml similarity index 68% rename from project/playbooks/precheck.yaml rename to builtin/playbooks/precheck.yaml index b608e93f..879b09c7 100644 --- a/project/playbooks/precheck.yaml +++ b/builtin/playbooks/precheck.yaml @@ -1,13 +1,15 @@ --- -- hosts: - - k8s_cluster - - etcd - gather_facts: true - roles: - - precheck/env_check - - hosts: - localhost roles: - role: precheck/artifact_check - when: artifact_file | defined + when: artifact.artifact_file | defined + +- hosts: + - k8s_cluster + - etcd + - image_registry + - nfs + gather_facts: true + roles: + - precheck/env_check diff --git a/builtin/roles/addons/cni/defaults/main.yaml b/builtin/roles/addons/cni/defaults/main.yaml new file mode 100644 index 00000000..9ad0fa74 --- /dev/null +++ b/builtin/roles/addons/cni/defaults/main.yaml @@ -0,0 +1,49 @@ +cni: + kube_proxy: "{{ kubernetes.kube_proxy.enabled|default_if_none:true }}" + # apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja + api_version_policy: "{%if (kube_version|version:'1) %}true{% else %}false{% endif %}" + kube_pods_v4_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|first }}" + kube_pods_v6_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|last }}" + node_cidr_mask_size: "{{ kubernetes.controller_manager.kube_network_node_prefix|default_if_none:24 }}" + kube_svc_cidr: "{{ kubernetes.networking.service_cidr|default_if_none:'10.233.0.0/18' }}" + multus: + enabled: false + image: kubesphere/multus-cni:v3.8 + calico: + # when cluster node > 50. it default true. + typha: "{%if (groups['k8s_cluster']|length > 50) %}true{% else %}false{% endif %}" + veth_mtu: 0 + ipip_mode: Always + vxlan_mode: Never + # true is enabled + ipv4pool_nat_outgoing: true + # true is enabled + default_ip_pool: true + # image + cni_image: "calico/cni:{{ calico_version }}" + node_image: "calico/node:{{ calico_version }}" + kube_controller_image: "calico/kube-controllers:{{ calico_version }}" + typha_image: "calico/typha:{{ calico_version }}" + replicas: 1 + node_selector: {} + flannel: + # https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md + backend: vxlan + cni_plugin_image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1 + flannel_image: "docker.io/flannel/flannel:{{ flannel_version }}" + cilium: + operator_image: cilium/operator-generic:1.15.3 + cilium_image: cilium/cilium:1.15.3 + k8s_endpoint: "{% if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ groups['kube_control_plane']|first }}{% endif %}" + k8s_port: "{{ kubernetes.apiserver.port|default_if_none:6443 }}" + kubeovn: + replica: 1 + registry: docker.io/kubeovn + hybridnet: + registryURL: docker.io +# hybridnet_image: hybridnetdev/hybridnet +# hybridnet_tag: v0.8.8 diff --git a/builtin/roles/addons/cni/tasks/calico.yaml b/builtin/roles/addons/cni/tasks/calico.yaml new file mode 100644 index 00000000..12e4e59f --- /dev/null +++ b/builtin/roles/addons/cni/tasks/calico.yaml @@ -0,0 +1,9 @@ +--- +- name: Generate calico manifest + template: + src: "calico/{{ calico_version|split:'.'|slice:':2'|join:'.' }}.yaml" + dest: "/etc/kubernetes/cni/calico-{{ calico_version }}.yaml" + +- name: Apply calico + command: | + /usr/local/bin/kubectl apply -f /etc/kubernetes/cni/calico-{{ calico_version }}.yaml --force diff --git a/builtin/roles/addons/cni/tasks/cilium.yaml b/builtin/roles/addons/cni/tasks/cilium.yaml new file mode 100644 index 00000000..efaed117 --- /dev/null +++ b/builtin/roles/addons/cni/tasks/cilium.yaml @@ -0,0 +1,21 @@ +--- +- name: Sync cilium helm chart to remote + copy: + src: "{{ work_dir }}/kubekey/cni/cilium-{{ cilium_version }}.tgz" + dest: "/etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz" + +# https://docs.cilium.io/en/stable/installation/k8s-install-helm/ +- name: Install cilium + command: | + helm install cilium /etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz --namespace kube-system \ + --set operator.image.override={{ cni.cilium.operator_image }} \ + --set operator.replicas={{ cni.cilium.operator_replicas }} \ + --set image.override={{ cni.cilium.cilium_image }} \ + --set ipv6.enabled={% if (cni.ipv6_support=="true") %}true{%else%}false{% endif %} \ + --set ipv4NativeRoutingCIDR: {{ cni.kube_pods_v4_cidr }} \ + {% if (cni.ipv6_support=="true") %} + --set ipv6NativeRoutingCIDR: {{ cni.kube_pods_v6_cidr }} \ + {% endif %} + {% if (cni.kube_proxy=="true") %} + --set kubeProxyReplacement=strict --set k8sServiceHost={{ cni.cilium.k8s_endpoint }} --set k8sServicePort={{ cni.cilium.k8s_port }} + {% endif %} diff --git a/builtin/roles/addons/cni/tasks/flannel.yaml b/builtin/roles/addons/cni/tasks/flannel.yaml new file mode 100644 index 00000000..7832f4e2 --- /dev/null +++ b/builtin/roles/addons/cni/tasks/flannel.yaml @@ -0,0 +1,10 @@ +--- +# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md +- name: Generate flannel manifest + template: + src: "flannel/flannel.yaml" + dest: "/etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml" + +- name: Apply calico + command: | + /usr/local/bin/kubectl apply -f /etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml diff --git a/builtin/roles/addons/cni/tasks/hybridnet.yaml b/builtin/roles/addons/cni/tasks/hybridnet.yaml new file mode 100644 index 00000000..2280d5cf --- /dev/null +++ b/builtin/roles/addons/cni/tasks/hybridnet.yaml @@ -0,0 +1,17 @@ +--- +- name: Sync hybridnet helm chart to remote + copy: + src: "{{ work_dir }}/kubekey/cni/hybridnet-{{ hybridnet_version }}.tgz" + dest: "/etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz" + +# https://artifacthub.io/packages/helm/hybridnet/hybridnet +- name: Install hybridnet + command: | + helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz --namespace kube-system \ + {% if cni.hybridnet.hybridnet_image %} + --set images.hybridnet.image={{ cni.hybridnet.hybridnet_image }} \ + {% endif %} + {% if cni.hybridnet.hybridnet_tag %} + --set images.hybridnet.tag={{ cni.hybridnet.hybridnet_tag }} \ + {% endif %} + --set image.registryURL={{ cni.hybridnet.registryURL }} \ diff --git a/builtin/roles/addons/cni/tasks/kubeovn.yaml b/builtin/roles/addons/cni/tasks/kubeovn.yaml new file mode 100644 index 00000000..2fc6ba8a --- /dev/null +++ b/builtin/roles/addons/cni/tasks/kubeovn.yaml @@ -0,0 +1,24 @@ +--- +- name: Add kubeovn label to node + command: | + kubectl label node -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite + kubectl label node -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite + +# kubeovn-0.1.0.tgz is helm version not helm appVersion +- name: Sync kubeovn helm chart to remote + copy: + src: "{{ work_dir }}/kubekey/cni/kubeovn-{{ kubeovn_version }}.tgz" + dest: "/etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz" + +# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart +- name: Install kubeovn + command: | + helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz --set replicaCount={{ cni.kubeovn.replica }} \ + --set MASTER_NODES={% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %} \ + --set global.registry.address={{ cni.kubeovn.registry }} \ + --set ipv4.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \ + {% if (cni.ipv6_support=="true") %} + --set networking.NET_STACK=dual_stack \ + --set dual_stack.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }},{{ cni.kubeovn.kube_pods_v6_cidr }} \ + --set dual_stack.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \ + {% endif %} diff --git a/builtin/roles/addons/cni/tasks/main.yaml b/builtin/roles/addons/cni/tasks/main.yaml new file mode 100644 index 00000000..2d7eb84c --- /dev/null +++ b/builtin/roles/addons/cni/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +- include_tasks: calico.yaml + when: cni.kube_network_plugin == "calico" + +- include_tasks: flannel.yaml + when: cni.kube_network_plugin == "flannel" + +- include_tasks: cilium.yaml + when: cni.kube_network_plugin == "cilium" + +- include_tasks: kubeovn.yaml + when: cni.kube_network_plugin == "kubeovn" + +- include_tasks: hybridnet.yaml + when: cni.kube_network_plugin == "hyvbridnet" + +- include_tasks: multus.yaml + when: cni.multus.enabled diff --git a/builtin/roles/addons/cni/tasks/multus.yaml b/builtin/roles/addons/cni/tasks/multus.yaml new file mode 100644 index 00000000..b91f30b6 --- /dev/null +++ b/builtin/roles/addons/cni/tasks/multus.yaml @@ -0,0 +1,9 @@ +--- +- name: Generate multus yaml + template: + src: multus/multus.yaml + desc: /etc/kubernetes/cni/cmultus.yaml + +- name: Apply multus + command: | + kubectl apply -f /etc/kubernetes/cni/cmultus.yaml diff --git a/builtin/roles/addons/cni/templates/calico/pdg.yaml b/builtin/roles/addons/cni/templates/calico/pdg.yaml new file mode 100644 index 00000000..e35cebc9 --- /dev/null +++ b/builtin/roles/addons/cni/templates/calico/pdg.yaml @@ -0,0 +1,35 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: {{ cni.api_version_policy }} +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + +{% if (cni.calico.typha=="true") %} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: {{ cni.api_version_policy }} +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{% endif %} diff --git a/builtin/roles/addons/cni/templates/calico/v3.27.yaml b/builtin/roles/addons/cni/templates/calico/v3.27.yaml new file mode 100644 index 00000000..7f7928c8 --- /dev/null +++ b/builtin/roles/addons/cni/templates/calico/v3.27.yaml @@ -0,0 +1,5339 @@ +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # You must set a non-zero value for Typha replicas below. + typha_service_name: {% if (cni.calico.typha=="true") %}"calico-typha"{% else %}"none"{% endif %} + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "{{ cni.calico.veth_mtu }}" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + ignoredInterfaces: + description: IgnoredInterfaces indicates the network interfaces that + needs to be excluded when reading device routes. + items: + type: string + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: bgpfilters.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPFilter + listKind: BGPFilterList + plural: bgpfilters + singular: bgpfilter + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of + the BGP Filter. + properties: + exportV4: + description: The ordered set of IPv4 BGPFilter rules acting on exporting + routes to a peer. + items: + description: BGPFilterRuleV4 defines a BGP filter rule consisting + a single IPv4 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + source: + type: string + required: + - action + type: object + type: array + exportV6: + description: The ordered set of IPv6 BGPFilter rules acting on exporting + routes to a peer. + items: + description: BGPFilterRuleV6 defines a BGP filter rule consisting + a single IPv6 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + source: + type: string + required: + - action + type: object + type: array + importV4: + description: The ordered set of IPv4 BGPFilter rules acting on importing + routes from a peer. + items: + description: BGPFilterRuleV4 defines a BGP filter rule consisting + a single IPv4 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + source: + type: string + required: + - action + type: object + type: array + importV6: + description: The ordered set of IPv6 BGPFilter rules acting on importing + routes from a peer. + items: + description: BGPFilterRuleV6 defines a BGP filter rule consisting + a single IPv6 CIDR block and a filter action for this CIDR. + properties: + action: + type: string + cidr: + type: string + interface: + type: string + matchOperator: + type: string + source: + type: string + required: + - action + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + filters: + description: The ordered set of BGPFilters applied on this BGP peer. + items: + type: string + type: array + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + reachableBy: + description: Add an exact, i.e. /32, static route toward peer IP in + order to prevent route flapping. ReachableBy contains the address + of the gateway which peer can be reached by. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + ttlSecurity: + description: TTLSecurity enables the generalized TTL security mechanism + (GTSM) which protects against spoofed packets by ignoring received + packets with a smaller than expected TTL value. The provided value + is the number of hops (edges) between the peers. + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfCTLBLogFilter: + description: 'BPFCTLBLogFilter specifies, what is logged by connect + time load balancer when BPFLogLevel is debug. Currently has to be + specified as ''all'' when BPFLogFilters is set to see CTLB logs. + [Default: unset - means logs are emitted when BPFLogLevel id debug + and BPFLogFilters not set.]' + type: string + bpfConnectTimeLoadBalancing: + description: 'BPFConnectTimeLoadBalancing when in BPF mode, controls + whether Felix installs the connect-time load balancer. The connect-time + load balancer is required for the host to be able to reach Kubernetes + services and it improves the performance of pod-to-service connections.When + set to TCP, connect time load balancing is available only for services + with TCP ports. [Default: TCP]' + enum: + - TCP + - Enabled + - Disabled + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. + This will be deprecated. Use BPFConnectTimeLoadBalancing [Default: + true]' + type: boolean + bpfDSROptoutCIDRs: + description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded + from DSR. That is, clients in those CIDRs will accesses nodeports + as if BPFExternalServiceMode was set to Tunnel. + items: + type: string + type: array + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableGROForIfaces: + description: BPFDisableGROForIfaces is a regular expression that controls + which interfaces Felix should disable the Generic Receive Offload + [GRO] option. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all host interfaces + with BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled, Strict or Loose. [Default: + Loose]' + pattern: ^(?i)(Disabled|Strict|Loose)?$ + type: string + bpfExcludeCIDRsFromNAT: + description: BPFExcludeCIDRsFromNAT is a list of CIDRs that are to + be excluded from NAT resolution so that host can handle them. A + typical usecase is node local DNS cache. + items: + type: string + type: array + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + pattern: ^(?i)(Tunnel|DSR)?$ + type: string + bpfForceTrackPacketsFromIfaces: + description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic + from these interfaces to skip Calico''s iptables NOTRACK rule, allowing + traffic from those interfaces to be tracked by Linux conntrack. Should + only be used for interfaces that are not used for the Calico fabric. For + example, a docker bridge device for non-Calico-networked containers. + [Default: docker+]' + items: + type: string + type: array + bpfHostConntrackBypass: + description: 'BPFHostConntrackBypass Controls whether to bypass Linux + conntrack in BPF mode for workloads and services. [Default: true + - bypass Linux conntrack]' + type: boolean + bpfHostNetworkedNATWithoutCTLB: + description: 'BPFHostNetworkedNATWithoutCTLB when in BPF mode, controls + whether Felix does a NAT without CTLB. This along with BPFConnectTimeLoadBalancing + determines the CTLB behavior. [Default: Enabled]' + enum: + - Enabled + - Disabled + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + bpfL3IfacePattern: + description: BPFL3IfacePattern is a regular expression that allows + to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + in addition to BPFDataIfacePattern. That is, tunnel interfaces not + created by Calico, that Calico workload traffic flows over as well + as any interfaces that handle incoming traffic to nodeports and + services from outside the cluster. + type: string + bpfLogFilters: + additionalProperties: + type: string + description: "BPFLogFilters is a map of key=values where the value + is a pcap filter expression and the key is an interface name with + 'all' denoting all interfaces, 'weps' all workload endpoints and + 'heps' all host endpoints. \n When specified as an env var, it accepts + a comma-separated list of key=values. [Default: unset - means all + debug logs are emitted]" + type: object + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + pattern: ^(?i)(Off|Info|Debug)?$ + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + pattern: ^(?i)(insert|append)?$ + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: "DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix's (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s] \n Deprecated: replaced + by the generic HealthTimeoutOverrides." + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + debugSimulateDataplaneHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + pattern: ^(?i)(Drop|Accept|Return)?$ + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override feature detection + based on auto-detected platform capabilities. Values are specified + in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" + or "false" will force the feature, empty or omitted values are auto-detected. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ + type: string + featureGates: + description: FeatureGates is used to enable or disable tech-preview + Calico features. Values are specified in a comma separated list + with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". + This is used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ + type: string + floatingIPs: + description: FloatingIPs configures whether or not Felix will program + non-OpenStack floating IP addresses. (OpenStack-derived floating + IPs are always programmed, regardless of this setting.) + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + healthTimeoutOverrides: + description: HealthTimeoutOverrides allows the internal watchdog timeouts + of individual subcomponents to be overridden. This is useful for + working around "false positive" liveness timeouts that can occur + in particularly stressful workloads or if CPU is constrained. For + a list of active subcomponents, see Felix's logs. + items: + properties: + name: + type: string + timeout: + type: string + required: + - name + - timeout + type: object + type: array + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is Auto. + pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$ + type: string + iptablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ + type: string + iptablesFilterDenyAction: + description: IptablesFilterDenyAction controls what happens to traffic + that is denied by network policy. By default Calico blocks traffic + with an iptables "DROP" action. If you want to use "REJECT" action + instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + iptablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ + type: string + routeSyncDisabled: + description: RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + pattern: ^(?i)(Drop|Reject|Disabled)?$ + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + windowsManageFirewallRules: + description: 'WindowsManageFirewallRules configures whether or not + Felix will program Windows Firewall rules. (to allow inbound access + to its own metrics ports) [Default: Disabled]' + enum: + - Enabled + - Disabled + type: string + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). + [Default: false]' + type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + maximum: 2147483647 + minimum: 0 + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When natOutgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-cni-plugin + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - bgpfilters + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # The CNI plugin and calico/node need to be able to create a default + # IPAMConfiguration + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +# Source: calico/templates/calico-node-rbac.yaml +# CNI cluster role +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-cni-plugin +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + - clusterinformations + - ippools + - ipreservations + - ipamconfigs + verbs: + - get + - list + - create + - update + - delete +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: + - kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: + - kind: ServiceAccount + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: + - kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system + +{% if (cni.calico.typha=="true") %} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +{% endif %} +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {{ cni.calico.cni_image }} + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ cni.calico.cni_image }} + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: {{ cni.calico.node_image }} + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ cni.calico.node_image }} + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + {% if (cni.calico.typha=="true") %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name + {% endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: NODEIP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: IP_AUTODETECTION_METHOD + value: "can-reach=$(NODEIP)" + - name: IP + value: "autodetect" + {% if (cni.ipv6_support=="true") %} + - name: IP6 + value: "autodetect" + {% endif %} + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "{{ cni.calico.ipip_mode }}" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "{{ cni.calico.vxlan_mode }}" + {% if (cni.calico.ipv4pool_nat_outgoing) %} + - name: CALICO_IPV4POOL_NAT_OUTGOING + value: "true" + {% else %} + - name: CALICO_IPV4POOL_NAT_OUTGOING + value: "false" + {% endif %} + {% if (cni.ipv6_support=="true") %} + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Always" + - name: CALICO_IPV6POOL_NAT_OUTGOING + value: "true" + {% else %} + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" + - name: CALICO_IPV6POOL_NAT_OUTGOING + value: "false" + {% endif %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + {% if cni.calico.default_ip_pool %} + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. + - name: CALICO_IPV4POOL_CIDR + value: "{{ cni.kube_pods_v4_cidr }}" + - name: CALICO_IPV4POOL_BLOCK_SIZE + value: "{{ cni.node_cidr_mask_size }}" + {% if (cni.ipv6_support=="true") %} + - name: CALICO_IPV6POOL_CIDR + value: "{{ cni.kube_pods_v6_cidr }}" + - name: CALICO_IPV6POOL_BLOCK_SIZE + value: "120" + {% endif %} + {% else %} + - name: NO_DEFAULT_POOLS + value: "true" + - name: CALICO_IPV4POOL_CIDR + value: "" + {% if (cni.ipv6_support=="true") %} + - name: CALICO_IPV6POOL_CIDR + value: "" + {% endif %} + {% endif %} + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + {% if (cni.ipv6_support=="true") %} + - name: FELIX_IPV6SUPPORT + value: "true" + {% else %} + - name: FELIX_IPV6SUPPORT + value: "false" + {% endif %} + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_DEVICEROUTESOURCEADDRESS + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: {{ cni.calico.replicas }} + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux +{{ cni.calico.node_selector|to_yaml:8|safe }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - calico-kube-controllers + topologyKey: kubernetes.io/hostname + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: {{ cni.calico.kube_controller_image }} + imagePullPolicy: IfNotPresent + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + +{% if (cni.calico.typha=="true") %} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ cni.calico.replicas }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + strategy: + rollingUpdate: + # 100% surge allows a complete up-level set of typha instances to start and become ready, + # which in turn allows all the back-level typha instances to start shutting down. This + # means that connections tend to bounce directly from a back-level instance to an up-level + # instance. + maxSurge: 100% + # In case the cluster is unable to schedule extra surge instances, allow at most one instance + # to shut down to make room. You can set this to 0 if you're sure there'll always be enough room to + # schedule extra typha instances during an upgrade (because setting it to 0 blocks shutdown until + # up-level typha instances are online and ready). + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + kubernetes.io/os: linux +{{ cni.calico.node_selector|to_yaml:8|safe }} + hostNetwork: true + # Typha supports graceful shut down, disconnecting clients slowly during the grace period. + # The TYPHA_SHUTDOWNTIMEOUTSECS env var should be kept in sync with this value. + terminationGracePeriodSeconds: 300 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - calico-typha + topologyKey: kubernetes.io/hostname + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Make sure Typha can get scheduled on any nodes. + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {{ cni.calico.typha_image }} + imagePullPolicy: IfNotPresent + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Set this to the same value as terminationGracePeriodSeconds; it tells Typha how much time + # it has to shut down. + - name: TYPHA_SHUTDOWNTIMEOUTSECS + value: "300" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + #- name: TYPHA_PROMETHEUSMETRICSENABLED + # value: "true" + #- name: TYPHA_PROMETHEUSMETRICSPORT + # value: "9093" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + timeoutSeconds: 10 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + timeoutSeconds: 10 +{% endif %} diff --git a/builtin/roles/addons/cni/templates/flannel/flannel.yaml b/builtin/roles/addons/cni/templates/flannel/flannel.yaml new file mode 100644 index 00000000..1657b78c --- /dev/null +++ b/builtin/roles/addons/cni/templates/flannel/flannel.yaml @@ -0,0 +1,213 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: kube-flannel + labels: + k8s-app: flannel + pod-security.kubernetes.io/enforce: privileged +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: + - kind: ServiceAccount + name: flannel + namespace: kube-flannel +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: flannel + name: flannel + namespace: kube-flannel +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-flannel + labels: + tier: node + k8s-app: flannel + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ cni.kube_pods_v4_cidr }}", +{% if (cni.ipv6_support=="true") %} + "EnableIPv6": true, + "IPv6Network":"{{ cni.kube_pods_v6_cidr }}", +{% endif %} + "EnableNFTables": {{ cni.kube_proxy }}, + "Backend": { + "Type": "{{ cni.flannel.backend }}" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-flannel + labels: + tier: node + app: flannel + k8s-app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-plugin + image: {{ cni.flannel.cni_plugin_image }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: {{ cni.flannel.flannel_image }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: {{ cni.flannel.flannel_image }} + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni-plugin + hostPath: + path: /opt/cni/bin + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate diff --git a/builtin/roles/addons/cni/templates/multus.deployment b/builtin/roles/addons/cni/templates/multus.deployment new file mode 100644 index 00000000..9ebfc8df --- /dev/null +++ b/builtin/roles/addons/cni/templates/multus.deployment @@ -0,0 +1,206 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + image: {{ .MultusImage }} + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/builtin/roles/addons/cni/templates/multus/multus.yaml b/builtin/roles/addons/cni/templates/multus/multus.yaml new file mode 100644 index 00000000..34175f5d --- /dev/null +++ b/builtin/roles/addons/cni/templates/multus/multus.yaml @@ -0,0 +1,206 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: + - kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + image: {{ cni.multus.image }} + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/builtin/roles/addons/kata/defaults/main.yaml b/builtin/roles/addons/kata/defaults/main.yaml new file mode 100644 index 00000000..7fd7bdbc --- /dev/null +++ b/builtin/roles/addons/kata/defaults/main.yaml @@ -0,0 +1,3 @@ +kata: + enabled: false + image: kubesphere/kata-deploy:stable diff --git a/builtin/roles/addons/kata/tasks/main.yaml b/builtin/roles/addons/kata/tasks/main.yaml new file mode 100644 index 00000000..f5e662d4 --- /dev/null +++ b/builtin/roles/addons/kata/tasks/main.yaml @@ -0,0 +1,11 @@ +--- +- name: Generate kata deploy file + template: + src: "kata-deploy.yaml" + dest: "/etc/kubernetes/addons/kata-deploy.yaml" + when: kata.enabled + +- name: Deploy kata + command: | + kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml + when: kata.enabled diff --git a/builtin/roles/addons/kata/templates/kata-deploy.yaml b/builtin/roles/addons/kata/templates/kata-deploy.yaml new file mode 100644 index 00000000..08bbde33 --- /dev/null +++ b/builtin/roles/addons/kata/templates/kata-deploy.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kata-label-node + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: node-labeler +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kata-label-node-rb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-labeler +subjects: + - kind: ServiceAccount + name: kata-label-node + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kata-deploy + namespace: kube-system +spec: + selector: + matchLabels: + name: kata-deploy + template: + metadata: + labels: + name: kata-deploy + spec: + serviceAccountName: kata-label-node + containers: + - name: kube-kata + image: {{ kata.image }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"] + command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: false + volumeMounts: + - name: crio-conf + mountPath: /etc/crio/ + - name: containerd-conf + mountPath: /etc/containerd/ + - name: kata-artifacts + mountPath: /opt/kata/ + - name: dbus + mountPath: /var/run/dbus + - name: systemd + mountPath: /run/systemd + - name: local-bin + mountPath: /usr/local/bin/ + volumes: + - name: crio-conf + hostPath: + path: /etc/crio/ + - name: containerd-conf + hostPath: + path: /etc/containerd/ + - name: kata-artifacts + hostPath: + path: /opt/kata/ + type: DirectoryOrCreate + - name: dbus + hostPath: + path: /var/run/dbus + - name: systemd + hostPath: + path: /run/systemd + - name: local-bin + hostPath: + path: /usr/local/bin/ + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1beta1 +metadata: + name: kata-qemu +handler: kata-qemu +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1beta1 +metadata: + name: kata-clh +handler: kata-clh +overhead: + podFixed: + memory: "130Mi" + cpu: "250m" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1beta1 +metadata: + name: kata-fc +handler: kata-fc +overhead: + podFixed: + memory: "130Mi" + cpu: "250m" diff --git a/builtin/roles/addons/nfd/defaults/main.yaml b/builtin/roles/addons/nfd/defaults/main.yaml new file mode 100644 index 00000000..6dcf469d --- /dev/null +++ b/builtin/roles/addons/nfd/defaults/main.yaml @@ -0,0 +1,3 @@ +nfd: + enabled: false + image: kubesphere/node-feature-discovery:v0.10.0 diff --git a/builtin/roles/addons/nfd/tasks/main.yaml b/builtin/roles/addons/nfd/tasks/main.yaml new file mode 100644 index 00000000..6b6a7a40 --- /dev/null +++ b/builtin/roles/addons/nfd/tasks/main.yaml @@ -0,0 +1,11 @@ +--- +- name: Generate nfd deploy file + template: + src: "nfd-deploy.yaml" + dest: "/etc/kubernetes/addons/nfd-deploy.yaml" + when: nfd.enabled + +- name: Deploy nfd + command: | + kubectl apply -f /etc/kubernetes/addons/nfd-deploy.yaml + when: nfd.enabled diff --git a/builtin/roles/addons/nfd/templates/nfd-deploy.yaml b/builtin/roles/addons/nfd/templates/nfd-deploy.yaml new file mode 100644 index 00000000..f47d20e5 --- /dev/null +++ b/builtin/roles/addons/nfd/templates/nfd-deploy.yaml @@ -0,0 +1,621 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: node-feature-discovery +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: nodefeaturerules.nfd.k8s-sigs.io +spec: + group: nfd.k8s-sigs.io + names: + kind: NodeFeatureRule + listKind: NodeFeatureRuleList + plural: nodefeaturerules + singular: nodefeaturerule + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeFeatureRule resource specifies a configuration for feature-based customization of node objects, such as node labeling. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NodeFeatureRuleSpec describes a NodeFeatureRule. + properties: + rules: + description: Rules is a list of node customization rules. + items: + description: Rule defines a rule for node customization such as labeling. + properties: + labels: + additionalProperties: + type: string + description: Labels to create if the rule matches. + type: object + labelsTemplate: + description: LabelsTemplate specifies a template to expand for dynamically generating multiple labels. Data (after template expansion) must be keys with an optional value ([=]) separated by newlines. + type: string + matchAny: + description: MatchAny specifies a list of matchers one of which must match. + items: + description: MatchAnyElem specifies one sub-matcher of MatchAny. + properties: + matchFeatures: + description: MatchFeatures specifies a set of matcher terms all of which must match. + items: + description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set. + properties: + feature: + type: string + matchExpressions: + additionalProperties: + description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions." + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values. + type: object + required: + - feature + - matchExpressions + type: object + type: array + required: + - matchFeatures + type: object + type: array + matchFeatures: + description: MatchFeatures specifies a set of matcher terms all of which must match. + items: + description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set. + properties: + feature: + type: string + matchExpressions: + additionalProperties: + description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions." + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values. + type: object + required: + - feature + - matchExpressions + type: object + type: array + name: + description: Name of the rule. + type: string + vars: + additionalProperties: + type: string + description: Vars is the variables to store if the rule matches. Variables do not directly inflict any changes in the node object. However, they can be referenced from other rules enabling more complex rule hierarchies, without exposing intermediary output values as labels. + type: object + varsTemplate: + description: VarsTemplate specifies a template to expand for dynamically generating multiple variables. Data (after template expansion) must be keys with an optional value ([=]) separated by newlines. + type: string + required: + - name + type: object + type: array + required: + - rules + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfd-master + namespace: node-feature-discovery +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nfd-master +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch + - update + - list + - apiGroups: + - topology.node.k8s.io + resources: + - noderesourcetopologies + verbs: + - create + - get + - update + - apiGroups: + - nfd.k8s-sigs.io + resources: + - nodefeaturerules + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nfd-master +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nfd-master +subjects: + - kind: ServiceAccount + name: nfd-master + namespace: node-feature-discovery +--- +apiVersion: v1 +data: + nfd-worker.conf: | + #core: + # labelWhiteList: + # noPublish: false + # sleepInterval: 60s + # featureSources: [all] + # labelSources: [all] + # klog: + # addDirHeader: false + # alsologtostderr: false + # logBacktraceAt: + # logtostderr: true + # skipHeaders: false + # stderrthreshold: 2 + # v: 0 + # vmodule: + ## NOTE: the following options are not dynamically run-time configurable + ## and require a nfd-worker restart to take effect after being changed + # logDir: + # logFile: + # logFileMaxSize: 1800 + # skipLogHeaders: false + #sources: + # cpu: + # cpuid: + ## NOTE: whitelist has priority over blacklist + # attributeBlacklist: + # - "BMI1" + # - "BMI2" + # - "CLMUL" + # - "CMOV" + # - "CX16" + # - "ERMS" + # - "F16C" + # - "HTT" + # - "LZCNT" + # - "MMX" + # - "MMXEXT" + # - "NX" + # - "POPCNT" + # - "RDRAND" + # - "RDSEED" + # - "RDTSCP" + # - "SGX" + # - "SSE" + # - "SSE2" + # - "SSE3" + # - "SSE4" + # - "SSE42" + # - "SSSE3" + # attributeWhitelist: + # kernel: + # kconfigFile: "/path/to/kconfig" + # configOpts: + # - "NO_HZ" + # - "X86" + # - "DMI" + # pci: + # deviceClassWhitelist: + # - "0200" + # - "03" + # - "12" + # deviceLabelFields: + # - "class" + # - "vendor" + # - "device" + # - "subsystem_vendor" + # - "subsystem_device" + # usb: + # deviceClassWhitelist: + # - "0e" + # - "ef" + # - "fe" + # - "ff" + # deviceLabelFields: + # - "class" + # - "vendor" + # - "device" + # custom: + # # The following feature demonstrates the capabilities of the matchFeatures + # - name: "my custom rule" + # labels: + # my-ng-feature: "true" + # # matchFeatures implements a logical AND over all matcher terms in the + # # list (i.e. all of the terms, or per-feature matchers, must match) + # matchFeatures: + # - feature: cpu.cpuid + # matchExpressions: + # AVX512F: {op: Exists} + # - feature: cpu.cstate + # matchExpressions: + # enabled: {op: IsTrue} + # - feature: cpu.pstate + # matchExpressions: + # no_turbo: {op: IsFalse} + # scaling_governor: {op: In, value: ["performance"]} + # - feature: cpu.rdt + # matchExpressions: + # RDTL3CA: {op: Exists} + # - feature: cpu.sst + # matchExpressions: + # bf.enabled: {op: IsTrue} + # - feature: cpu.topology + # matchExpressions: + # hardware_multithreading: {op: IsFalse} + # + # - feature: kernel.config + # matchExpressions: + # X86: {op: Exists} + # LSM: {op: InRegexp, value: ["apparmor"]} + # - feature: kernel.loadedmodule + # matchExpressions: + # e1000e: {op: Exists} + # - feature: kernel.selinux + # matchExpressions: + # enabled: {op: IsFalse} + # - feature: kernel.version + # matchExpressions: + # major: {op: In, value: ["5"]} + # minor: {op: Gt, value: ["10"]} + # + # - feature: storage.block + # matchExpressions: + # rotational: {op: In, value: ["0"]} + # dax: {op: In, value: ["0"]} + # + # - feature: network.device + # matchExpressions: + # operstate: {op: In, value: ["up"]} + # speed: {op: Gt, value: ["100"]} + # + # - feature: memory.numa + # matchExpressions: + # node_count: {op: Gt, value: ["2"]} + # - feature: memory.nv + # matchExpressions: + # devtype: {op: In, value: ["nd_dax"]} + # mode: {op: In, value: ["memory"]} + # + # - feature: system.osrelease + # matchExpressions: + # ID: {op: In, value: ["fedora", "centos"]} + # - feature: system.name + # matchExpressions: + # nodename: {op: InRegexp, value: ["^worker-X"]} + # + # - feature: local.label + # matchExpressions: + # custom-feature-knob: {op: Gt, value: ["100"]} + # + # # The following feature demonstrates the capabilities of the matchAny + # - name: "my matchAny rule" + # labels: + # my-ng-feature-2: "my-value" + # # matchAny implements a logical IF over all elements (sub-matchers) in + # # the list (i.e. at least one feature matcher must match) + # matchAny: + # - matchFeatures: + # - feature: kernel.loadedmodule + # matchExpressions: + # driver-module-X: {op: Exists} + # - feature: pci.device + # matchExpressions: + # vendor: {op: In, value: ["8086"]} + # class: {op: In, value: ["0200"]} + # - matchFeatures: + # - feature: kernel.loadedmodule + # matchExpressions: + # driver-module-Y: {op: Exists} + # - feature: usb.device + # matchExpressions: + # vendor: {op: In, value: ["8086"]} + # class: {op: In, value: ["02"]} + # + # # The following features demonstreate label templating capabilities + # - name: "my template rule" + # labelsTemplate: | + # matchFeatures: + # - feature: system.osrelease + # matchExpressions: + # ID: {op: InRegexp, value: ["^open.*"]} + # VERSION_ID.major: {op: In, value: ["13", "15"]} + # + # - name: "my template rule 2" + # matchFeatures: + # - feature: pci.device + # matchExpressions: + # class: {op: InRegexp, value: ["^06"]} + # vendor: ["8086"] + # - feature: cpu.cpuid + # matchExpressions: + # AVX: {op: Exists} + # + # # The following examples demonstrate vars field and back-referencing + # # previous labels and vars + # - name: "my dummy kernel rule" + # labels: + # "my.kernel.feature": "true" + # matchFeatures: + # - feature: kernel.version + # matchExpressions: + # major: {op: Gt, value: ["2"]} + # + # - name: "my dummy rule with no labels" + # vars: + # "my.dummy.var": "1" + # matchFeatures: + # - feature: cpu.cpuid + # matchExpressions: {} + # + # - name: "my rule using backrefs" + # labels: + # "my.backref.feature": "true" + # matchFeatures: + # - feature: rule.matched + # matchExpressions: + # my.kernel.feature: {op: IsTrue} + # my.dummy.var: {op: Gt, value: ["0"]} + # +kind: ConfigMap +metadata: + name: nfd-worker-conf + namespace: node-feature-discovery +--- +apiVersion: v1 +kind: Service +metadata: + name: nfd-master + namespace: node-feature-discovery +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: nfd-master + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nfd + name: nfd-master + namespace: node-feature-discovery +spec: + replicas: 1 + selector: + matchLabels: + app: nfd-master + template: + metadata: + labels: + app: nfd-master + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: In + values: + - "" + weight: 1 + - preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + weight: 1 + containers: + - args: [] + command: + - nfd-master + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: {{ nfd.image }} + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - /usr/bin/grpc_health_probe + - -addr=:8080 + initialDelaySeconds: 10 + periodSeconds: 10 + name: nfd-master + readinessProbe: + exec: + command: + - /usr/bin/grpc_health_probe + - -addr=:8080 + failureThreshold: 10 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + volumeMounts: [] + serviceAccount: nfd-master + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal + value: "" + volumes: [] +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: nfd + name: nfd-worker + namespace: node-feature-discovery +spec: + selector: + matchLabels: + app: nfd-worker + template: + metadata: + labels: + app: nfd-worker + spec: + containers: + - args: + - -server=nfd-master:8080 + command: + - nfd-worker + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: {{ nfd.image }} + imagePullPolicy: IfNotPresent + name: nfd-worker + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + volumeMounts: + - mountPath: /host-boot + name: host-boot + readOnly: true + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + - mountPath: /host-sys + name: host-sys + readOnly: true + - mountPath: /host-usr/lib + name: host-usr-lib + readOnly: true + - mountPath: /etc/kubernetes/node-feature-discovery/source.d/ + name: source-d + readOnly: true + - mountPath: /etc/kubernetes/node-feature-discovery/features.d/ + name: features-d + readOnly: true + - mountPath: /etc/kubernetes/node-feature-discovery + name: nfd-worker-conf + readOnly: true + dnsPolicy: ClusterFirstWithHostNet + volumes: + - hostPath: + path: /boot + name: host-boot + - hostPath: + path: /etc/os-release + name: host-os-release + - hostPath: + path: /sys + name: host-sys + - hostPath: + path: /usr/lib + name: host-usr-lib + - hostPath: + path: /etc/kubernetes/node-feature-discovery/source.d/ + name: source-d + - hostPath: + path: /etc/kubernetes/node-feature-discovery/features.d/ + name: features-d + - configMap: + name: nfd-worker-conf + name: nfd-worker-conf diff --git a/builtin/roles/addons/sc/defaults/main.yaml b/builtin/roles/addons/sc/defaults/main.yaml new file mode 100644 index 00000000..e0a3eaf2 --- /dev/null +++ b/builtin/roles/addons/sc/defaults/main.yaml @@ -0,0 +1,12 @@ +sc: + local: + enabled: true + default: true + provisioner_image: openebs/provisioner-localpv:3.3.0 + linux_utils_image: openebs/linux-utils:3.3.0 + path: /var/openebs/local + nfs: # each k8s_cluster node should install nfs-utils + enabled: false + default: false + server: "{{ groups['nfs']|first }}" + path: /share/kubernetes diff --git a/builtin/roles/addons/sc/tasks/local.yaml b/builtin/roles/addons/sc/tasks/local.yaml new file mode 100644 index 00000000..d535322e --- /dev/null +++ b/builtin/roles/addons/sc/tasks/local.yaml @@ -0,0 +1,9 @@ +--- +- name: Generate local manifest + template: + src: "local-volume.yaml" + dest: "/etc/kubernetes/addons/local-volume.yaml" + +- name: deploy local + command: | + /usr/local/bin/kubectl apply -f /etc/kubernetes/addons/local-volume.yaml diff --git a/builtin/roles/addons/sc/tasks/main.yaml b/builtin/roles/addons/sc/tasks/main.yaml new file mode 100644 index 00000000..18b3d58e --- /dev/null +++ b/builtin/roles/addons/sc/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include_tasks: local.yaml + when: sc.local.enabled + +- include_tasks: nfs.yaml + when: sc.nfs.enabled diff --git a/builtin/roles/addons/sc/tasks/nfs.yaml b/builtin/roles/addons/sc/tasks/nfs.yaml new file mode 100644 index 00000000..a578d2b9 --- /dev/null +++ b/builtin/roles/addons/sc/tasks/nfs.yaml @@ -0,0 +1,11 @@ +--- +- name: Sync nfs provisioner helm to remote + copy: + src: "{{ work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz" + dest: "/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz" + +- name: Deploy nfs provisioner + command: | + helm install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz --namespace kube-system \ + --set nfs.server={{ sc.nfs.server }} --set nfs.path={{ sc.nfs.path }} \ + --set storageClass.defaultClass={% if (sc.local.default) %}true{% else %}false{% endif %} diff --git a/builtin/roles/addons/sc/templates/local-volume.yaml b/builtin/roles/addons/sc/templates/local-volume.yaml new file mode 100644 index 00000000..53ed3ade --- /dev/null +++ b/builtin/roles/addons/sc/templates/local-volume.yaml @@ -0,0 +1,150 @@ +--- +#Sample storage classes for OpenEBS Local PV +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local + annotations: + storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]' + storageclass.beta.kubernetes.io/is-default-class: {% if (sc.local.default) %}"true"{% else %}"false"{% endif %} + openebs.io/cas-type: local + cas.openebs.io/config: | + - name: StorageType + value: "hostpath" + - name: BasePath + value: "{{ sc.local.path }}" +provisioner: openebs.io/local +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +--- +# Create Maya Service Account +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openebs-maya-operator + namespace: kube-system +--- +# Define Role that allows operations on K8s pods/deployments +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-maya-operator +rules: + - apiGroups: ["*"] + resources: ["nodes", "nodes/proxy"] + verbs: ["*"] + - apiGroups: ["*"] + resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"] + verbs: ["*"] + - apiGroups: ["*"] + resources: ["statefulsets", "daemonsets"] + verbs: ["*"] + - apiGroups: ["*"] + resources: ["resourcequotas", "limitranges"] + verbs: ["list", "watch"] + - apiGroups: ["*"] + resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "poddisruptionbudgets", "certificatesigningrequests"] + verbs: ["list", "watch"] + - apiGroups: ["*"] + resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"] + verbs: ["*"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: [ "get", "list", "create", "update", "delete", "patch"] + - apiGroups: ["openebs.io"] + resources: [ "*"] + verbs: ["*"] + - nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +# Bind the Service Account with the Role Privileges. +# TODO: Check if default account also needs to be there +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openebs-maya-operator +subjects: + - kind: ServiceAccount + name: openebs-maya-operator + namespace: kube-system +roleRef: + kind: ClusterRole + name: openebs-maya-operator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openebs-localpv-provisioner + namespace: kube-system + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: 3.3.0 +spec: + selector: + matchLabels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: 3.3.0 + spec: + serviceAccountName: openebs-maya-operator + containers: + - name: openebs-provisioner-hostpath + imagePullPolicy: IfNotPresent + image: {{ sc.local.provisioner_image }} + env: + # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://10.128.0.12:8080" + # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPENEBS_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as + # environment variable + - name: OPENEBS_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPENEBS_IO_ENABLE_ANALYTICS + value: "true" + - name: OPENEBS_IO_INSTALLER_TYPE + value: "openebs-operator-lite" + - name: OPENEBS_IO_HELPER_IMAGE + value: "{{ sc.local.linux_utils_image }}" + # LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default + # leader election is enabled. + #- name: LEADER_ELECTION_ENABLED + # value: "true" + # OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets + # to the helper pod launched by local-pv hostpath provisioner + #- name: OPENEBS_IO_IMAGE_PULL_SECRETS + # value: "" + livenessProbe: + exec: + command: + - sh + - -c + - test $(pgrep -c "^provisioner-loc.*") = 1 + initialDelaySeconds: 30 + periodSeconds: 60 diff --git a/builtin/roles/init/init-artifact/defaults/main.yaml b/builtin/roles/init/init-artifact/defaults/main.yaml new file mode 100644 index 00000000..3a51abee --- /dev/null +++ b/builtin/roles/init/init-artifact/defaults/main.yaml @@ -0,0 +1,101 @@ +artifact: + arch: [ "amd64" ] + # offline artifact package for kk. +# artifact_file: /tmp/kubekey.tar.gz + # the md5_file of artifact_file. +# artifact_md5: /tmp/artifact.md5 + # how to generate cert file.support: IfNotPresent, Always + gen_cert_policy: IfNotPresent + artifact_url: + etcd: + amd64: | + {% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% endif %} + kubeadm: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% endif %} + kubelet: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubelet{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubelet{% endif %} + kubectl: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubectl{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubectl{% endif %} + cni: + amd64: | + {% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% endif %} + helm: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-amd64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-arm64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-arm64.tar.gz{% endif %} + crictl: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% endif %} + docker: + amd64: | + {% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% endif %} + cridockerd: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% endif %} + containerd: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% endif %} + runc: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% endif %} + calicoctl: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% endif %} + dockercompose: + amd64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% endif %} + arm64: | + {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% endif %} +# registry: +# amd64: | +# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-amd64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-amd64.tgz{% endif %} +# arm64: | +# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-arm64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-arm64.tgz{% endif %} + harbor: + amd64: | + {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% endif %} +# arm64: | +# {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% endif %} +# keepalived: +# amd64: | +# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% endif %} +# arm64: | +# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% endif %} + oras: + amd64: | + https://github.com/oras-project/oras/releases/download/{{ oras_version }}/oras_{{ oras_version|slice:'1:' }}_linux_amd64.tar.gz + arm64: | + https://github.com/oras-project/oras/releases/download/{{ oras_version }}/oras_{{ oras_version|slice:'1:' }}_linux_arm64.tar.gz + cilium: https://helm.cilium.io/cilium-{{ cilium_version }}.tgz + kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ kubeovn_version }}.tgz + hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ hybridnet_version }}/hybridnet-{{ hybridnet_version }}.tgz + nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz + images: + auth: [] + list: [] diff --git a/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml b/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml new file mode 100644 index 00000000..8c475b29 --- /dev/null +++ b/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml @@ -0,0 +1,284 @@ +--- +- name: Check binaries for etcd + command: | + artifact_name={{ artifact.artifact_url.etcd[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.etcd[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.etcd[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - etcd_version | defined && etcd_version != "" + +- name: Check binaries for kube + command: | + kube_path={{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ item }} + if [ ! -f $kube_path/kubelet ]; then + mkdir -p $kube_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubelet[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $kube_path/kubelet {{ artifact.artifact_url.kubelet[item] }} + fi + if [ ! -f $kube_path/kubeadm ]; then + mkdir -p $kube_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubeadm[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $kube_path/kubeadm {{ artifact.artifact_url.kubeadm[item] }} + fi + if [ ! -f $kube_path/kubectl ]; then + mkdir -p $kube_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubectl[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $kube_path/kubectl {{ artifact.artifact_url.kubectl[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - kube_version | defined && kube_version != "" + +- name: Check binaries for cni + command: | + artifact_name={{ artifact.artifact_url.cni[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cni/{{ cni_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cni[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cni[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - cni_version | defined && cni_version != "" + +- name: Check binaries for helm + command: | + artifact_name={{ artifact.artifact_url.helm[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.helm[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.helm[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - helm_version | defined && helm_version != "" + +- name: Check binaries for crictl + command: | + artifact_name={{ artifact.artifact_url.crictl[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.crictl[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.crictl[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - crictl_version | defined && crictl_version != "" + +- name: Check binaries for docker + command: | + artifact_name={{ artifact.artifact_url.docker[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.docker[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.docker[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - docker_version | defined && docker_version != "" + +- name: Check binaries for cridockerd + command: | + artifact_name={{ artifact.artifact_url.cridockerd[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cridockerd[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cridockerd[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - cridockerd_version | defined && cridockerd_version != "" + +- name: Check binaries for containerd + command: | + artifact_name={{ artifact.artifact_url.containerd[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.containerd[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.containerd[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - containerd_version | defined && containerd_version != "" + +- name: Check binaries for runc + command: | + artifact_name={{ artifact.artifact_url.runc[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.runc[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.runc[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - runc_version | defined && runc_version != "" + +- name: Check binaries for calicoctl + command: | + artifact_name={{ artifact.artifact_url.calicoctl[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cni/{{ calico_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.calicoctl[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.calicoctl[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - calico_version | defined && calico_version != "" + +- name: Check binaries for registry + command: | + artifact_name={{ artifact.artifact_url.registry[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.registry[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.registry[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - registry_version | defined && registry_version != "" + +- name: Check binaries for docker-compose + command: | + compose_name=docker-compose + compose_path={{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ item }} + if [ ! -f $compose_path/$compose_name ]; then + mkdir -p $compose_path + # download online + curl -L -o $compose_path/$compose_name {{ artifact.artifact_url.dockercompose[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - dockercompose_version | defined && dockercompose_version != "" + +- name: Check binaries for harbor + command: | + harbor_name={{ artifact.artifact_url.harbor[item]|split:"/"|last }} + harbor_path={{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ item }} + if [ ! -f $harbor_path/$harbor_name ]; then + mkdir -p $harbor_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.harbor[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $harbor_path/$harbor_name {{ artifact.artifact_url.harbor[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - harbor_version | defined && harbor_version != "" + +- name: Check binaries for keepalived + command: | + artifact_name={{ artifact.artifact_url.keepalived[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.keepalived[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.keepalived[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - keepalived_version | defined && keepalived_version != "" + +- name: Check binaries for oras + command: | + artifact_name={{ artifact.artifact_url.oras[item]|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/oras/{{ oras_version }}/{{ item }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.oras[item] }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.oras[item] }} + fi + loop: "{{ artifact.arch }}" + when: + - oras_version | defined && oras_version != "" diff --git a/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml b/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml new file mode 100644 index 00000000..bc02b835 --- /dev/null +++ b/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml @@ -0,0 +1,44 @@ +--- +- name: Check binaries for cilium + command: | + artifact_name={{ artifact.artifact_url.cilium|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cni + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + cd $artifact_path && helm pull {{ artifact.artifact_url.cilium }} + fi + when: cilium_version | defined + +- name: Check binaries for kubeovn + command: | + artifact_name={{ artifact.artifact_url.kubeovn|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cni + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + cd $artifact_path && helm pull {{ artifact.artifact_url.kubeovn }} + fi + when: kubeovn_version | defined + +- name: Check binaries for hybridnet + command: | + artifact_name={{ artifact.artifact_url.hybridnet|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/cni + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + cd $artifact_path && helm pull {{ artifact.artifact_url.hybridnet }} + fi + when: hybridnet_version | defined + +- name: Check binaries for nfs_provisioner + command: | + artifact_name={{ artifact.artifact_url.nfs_provisioner|split:"/"|last }} + artifact_path={{ work_dir }}/kubekey/sc + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + cd $artifact_path && helm pull {{ artifact.artifact_url.nfs_provisioner }} + fi + when: nfs_provisioner_version| defined diff --git a/builtin/roles/init/init-artifact/tasks/download_by_oras.yaml b/builtin/roles/init/init-artifact/tasks/download_by_oras.yaml new file mode 100644 index 00000000..c9516029 --- /dev/null +++ b/builtin/roles/init/init-artifact/tasks/download_by_oras.yaml @@ -0,0 +1,10 @@ +--- +- name: Login in oras + command: | + oras login {{ item.url }} -u {{ item.user }} -p {{ item.password }} + loop: "{{ artifact.images.auth }}" + +- name: Copy images to local + command: | + oras cp --to-oci-layout {{ item }} {{ work_dir }}/kubekey/images/{{ item|split:"/"|join:"="|safe }} + loop: "{{ artifact.images.list }}" diff --git a/builtin/roles/init/init-artifact/tasks/main.yaml b/builtin/roles/init/init-artifact/tasks/main.yaml new file mode 100644 index 00000000..710aa89f --- /dev/null +++ b/builtin/roles/init/init-artifact/tasks/main.yaml @@ -0,0 +1,28 @@ +--- +- name: Create work_dir + command: | + if [ ! -d "{{ work_dir }}" ]; then + mkdir -p {{ work_dir }} + fi + +- name: Extract artifact to work_dir + command: | + if [ ! -f "{{ artifact.artifact_file }}" ]; then + tar -zxvf {{ artifact.artifact_file }} -C {{ work_dir }} + fi + when: artifact.artifact_file | defined + +- name: Download binaries + block: + # the binaries which download by curl + - include_tasks: download_by_curl.yaml + # the binaries which download by helm + - include_tasks: download_by_helm.yaml + # download image package by oras + - include_tasks: download_by_oras.yaml + +- include_tasks: pki.yaml + +- name: Chown work_dir to sudo + command: | + chown -R ${SUDO_UID}:${SUDO_GID} {{ work_dir }} diff --git a/builtin/roles/init/init-artifact/tasks/pki.yaml b/builtin/roles/init/init-artifact/tasks/pki.yaml new file mode 100644 index 00000000..d4605814 --- /dev/null +++ b/builtin/roles/init/init-artifact/tasks/pki.yaml @@ -0,0 +1,34 @@ +--- +- name: Generate root ca file + gen_cert: + cn: root + date: 87600h + policy: "{{ artifact.gen_cert_policy }}" + out_key: "{{ work_dir }}/kubekey/pki/root.key" + out_cert: "{{ work_dir }}/kubekey/pki/root.crt" + +- name: Generate etcd cert file + gen_cert: + root_key: "{{ work_dir }}/kubekey/pki/root.key" + root_cert: "{{ work_dir }}/kubekey/pki/root.crt" + cn: etcd + sans: | + [{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}] + date: 87600h + policy: "{{ artifact.gen_cert_policy }}" + out_key: "{{ work_dir }}/kubekey/pki/etcd.key" + out_cert: "{{ work_dir }}/kubekey/pki/etcd.crt" + when: groups['etcd']|length > 0 + +- name: Generate registry image cert file + gen_cert: + root_key: "{{ work_dir }}/kubekey/pki/root.key" + root_cert: "{{ work_dir }}/kubekey/pki/root.crt" + cn: image_registry + sans: | + [{% for h in groups['image_registry'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}] + date: 87600h + policy: "{{ artifact.gen_cert_policy }}" + out_key: "{{ work_dir }}/kubekey/pki/image_registry.key" + out_cert: "{{ work_dir }}/kubekey/pki/image_registry.crt" + when: groups['image_registry']|length > 0 diff --git a/builtin/roles/init/init-os/defaults/main.yaml b/builtin/roles/init/init-os/defaults/main.yaml new file mode 100644 index 00000000..40e0ec2f --- /dev/null +++ b/builtin/roles/init/init-os/defaults/main.yaml @@ -0,0 +1,2 @@ +ntp_servers: [ "cn.pool.ntp.org" ] +timezone: Asia/Shanghai diff --git a/builtin/roles/init/init-os/tasks/init_ntpserver.yaml b/builtin/roles/init/init-os/tasks/init_ntpserver.yaml new file mode 100644 index 00000000..462b4e66 --- /dev/null +++ b/builtin/roles/init/init-os/tasks/init_ntpserver.yaml @@ -0,0 +1,42 @@ +--- +- name: Configure ntp server + command: | + chronyConfigFile="/etc/chrony.conf" + if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then + chronyConfigFile="/etc/chrony/chrony.conf" + fi + # clear old server + sed -i '/^server/d' $chronyConfigFile + # disable pool + sed -i 's/^pool /#pool /g' $chronyConfigFile + # delete allow + sed -i '/^allow/d' $chronyConfigFile + # allow client + echo "allow 0.0.0.0/0" >> $chronyConfigFile + # delete local + sed -i '/^local/d' $chronyConfigFile + # add local + echo "local stratum 10" >> $chronyConfigFile + # add server + {% for server in ntp_servers %} + {% for _,v in inventory_hosts %} + {% if (v.inventory_name == server) %}{% set server = v.internal_ipv4%}{% endif %} + {% endfor %} + grep -q '^server {{ server }} iburst' $chronyConfigFile||sed '1a server {{ server }} iburst' -i $chronyConfigFile + {% endfor %} + +- name: Set timezone + command: | + timedatectl set-timezone {{ timezone }} + timedatectl set-ntp true + when: timezone | defined + +- name: Restart ntp server + command: | + chronyService="chronyd.service" + if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then + chronyService="chrony.service" + fi + systemctl restart $chronyService + when: + - ntp_servers | defined or timezone | defined diff --git a/builtin/roles/init/init-os/tasks/init_repository.yaml b/builtin/roles/init/init-os/tasks/init_repository.yaml new file mode 100644 index 00000000..2f70037c --- /dev/null +++ b/builtin/roles/init/init-os/tasks/init_repository.yaml @@ -0,0 +1,75 @@ +--- +- name: Sync repository + block: + - name: Sync repository file + ignore_errors: true + copy: + src: "{{ work_dir }}/kubekey/repository/{{ os.release.ID_LIKE }}-{{ os.release.VERSION_ID|safe }}-{{ binary_type.stdout }}.iso" + dest: "/tmp/kubekey/repository.iso" + - name: Mount iso file + command: | + if [ -f "/tmp/kubekey/repository.iso" ]; then + mount -t iso9660 -o loop /tmp/kubekey/repository.iso /tmp/kubekey/iso + fi + rescue: + - name: Unmount iso file + command: | + if [ -f "/tmp/kubekey/repository.iso" ]; then + umount /tmp/kubekey/iso + fi + +- name: Init repository + block: + - name: Init debian repository + command: | + if [ -f "/tmp/kubekey/repository.iso" ];then + # backup + mv /etc/apt/sources.list /etc/apt/sources.list.kubekey.bak + mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey.bak + mkdir -p /etc/apt/sources.list.d + # add repository + rm -rf /etc/apt/sources.list.d/* + echo 'deb [trusted=yes] file://tmp/kubekey/iso /' > /etc/apt/sources.list.d/kubekey.list + # update repository + apt-get update + # install + apt install -y socat conntrack ipset ebtables chrony ipvsadm + # reset repository + rm -rf /etc/apt/sources.list.d + mv /etc/apt/sources.list.kubekey.bak /etc/apt/sources.list + mv /etc/apt/sources.list.d.kubekey.bak /etc/apt/sources.list.d + else + apt install -y socat conntrack ipset ebtables chrony ipvsadm + fi + when: os.release.ID_LIKE == "debian" + - name: Init rhel repository + command: | + if [ -f "/tmp/kubekey/repository.iso" ];then + # backup + mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak + mkdir -p /etc/yum.repos.d + # add repository + rm -rf /etc/yum.repos.d/* + cat << EOF > /etc/yum.repos.d/CentOS-local.repo + [base-local] + name=rpms-local + + baseurl=file://%s + + enabled=1 + + gpgcheck=0 + + EOF + # update repository + yum clean all && yum makecache + # install + yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm + # reset repository + rm -rf /etc/yum.repos.d + mv /etc/yum.repos.d.kubekey.bak /etc/yum.repos.d + else + # install + yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm + fi + when: os.release.ID_LIKE == "rhel fedora" diff --git a/builtin/roles/init/init-os/tasks/main.yaml b/builtin/roles/init/init-os/tasks/main.yaml new file mode 100644 index 00000000..b76eab3d --- /dev/null +++ b/builtin/roles/init/init-os/tasks/main.yaml @@ -0,0 +1,25 @@ +--- +- include_tasks: init_repository.yaml + +- include_tasks: init_ntpserver.yaml + +- name: Reset tmp dir + command: | + if [ -d /tmp/kubekey ]; then + rm -rf /tmp/kubekey + fi + mkdir -m 777 -p /tmp/kubekey + +- name: Set hostname + command: | + hostnamectl set-hostname {{ inventory_name }} && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ inventory_name }}/g' /etc/hosts + +- name: Sync init os to remote + template: + src: init-os.sh + dest: /etc/kubekey/scripts/init-os.sh + mode: 0755 + +- name: Execute init os script + command: | + chmod +x /etc/kubekey/scripts/init-os.sh && /etc/kubekey/scripts/init-os.sh diff --git a/builtin/roles/init/init-os/templates/init-os.sh b/builtin/roles/init/init-os/templates/init-os.sh new file mode 100644 index 00000000..09b543a2 --- /dev/null +++ b/builtin/roles/init/init-os/templates/init-os.sh @@ -0,0 +1,193 @@ +#!/usr/bin/env bash + +# Copyright 2020 The KubeSphere Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +swapoff -a +sed -i /^[^#]*swap*/s/^/\#/g /etc/fstab + +# See https://github.com/kubernetes/website/issues/14457 +if [ -f /etc/selinux/config ]; then + sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config +fi +# for ubuntu: sudo apt install selinux-utils +# for centos: yum install selinux-policy +if command -v setenforce &> /dev/null +then + setenforce 0 + getenforce +fi + +echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf +echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf +echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf +echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf +echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf +echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf +echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf +echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf +echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf +echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf +echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf +echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf +echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf +echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf +echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf +echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf +echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf +echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf +echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf +echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf +echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf +echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf +echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf +echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf +echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf +echo 'vm.swappiness = 0' >> /etc/sysctl.conf +echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf +echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf +echo 'fs.inotify.max_user_watches = 10240001' >> /etc/sysctl.conf +echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf +echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf +echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf +echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf +echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf + +#add for ipv6 +echo 'net.ipv6.conf.all.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.default.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.lo.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.conf + +#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y +sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?vm.swappiness ?= ?([0-9]{1,})@vm.swappiness = 0@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?vm.overcommit_memory ?= ?(0|1|2)@vm.overcommit_memory = 0@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?fs.inotify.max_user_watches ?= ?([0-9]{1,})@fs.inotify.max_user_watches = 524288@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?fs.pipe-max-size ?= ?([0-9]{1,})@fs.pipe-max-size = 4194304@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.core.netdev_max_backlog ?= ?([0-9]{1,})@net.core.netdev_max_backlog = 65535@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.core.rmem_max ?= ?([0-9]{1,})@net.core.rmem_max = 33554432@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.core.wmem_max ?= ?([0-9]{1,})@net.core.wmem_max = 33554432@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.tcp_max_syn_backlog ?= ?([0-9]{1,})@net.ipv4.tcp_max_syn_backlog = 1048576@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh1 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh1 = 512@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh2 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh2 = 2048@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh3 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh3 = 4096@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.core.somaxconn ?= ?([0-9]{1,})@net.core.somaxconn = 32768@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.conf.eth0.arp_accept ?= ?(0|1)@net.ipv4.conf.eth0.arp_accept = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?fs.aio-max-nr ?= ?([0-9]{1,})@fs.aio-max-nr = 262144@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.tcp_retries2 ?= ?([0-9]{1,})@net.ipv4.tcp_retries2 = 15@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.tcp_max_tw_buckets ?= ?([0-9]{1,})@net.ipv4.tcp_max_tw_buckets = 1048576@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.tcp_max_orphans ?= ?([0-9]{1,})@net.ipv4.tcp_max_orphans = 65535@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min = 131072@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf +sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf + +tmpfile="$$.tmp" +awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf +mv $tmpfile /etc/sysctl.conf + +# ulimit +echo "* soft nofile 1048576" >> /etc/security/limits.conf +echo "* hard nofile 1048576" >> /etc/security/limits.conf +echo "* soft nproc 65536" >> /etc/security/limits.conf +echo "* hard nproc 65536" >> /etc/security/limits.conf +echo "* soft memlock unlimited" >> /etc/security/limits.conf +echo "* hard memlock unlimited" >> /etc/security/limits.conf + +sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf +sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf +sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf +sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf +sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf +sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf + +tmpfile="$$.tmp" +awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf +mv $tmpfile /etc/security/limits.conf + +systemctl stop firewalld 1>/dev/null 2>/dev/null +systemctl disable firewalld 1>/dev/null 2>/dev/null +systemctl stop ufw 1>/dev/null 2>/dev/null +systemctl disable ufw 1>/dev/null 2>/dev/null + +modinfo br_netfilter > /dev/null 2>&1 +if [ $? -eq 0 ]; then + modprobe br_netfilter + mkdir -p /etc/modules-load.d + echo 'br_netfilter' > /etc/modules-load.d/kubekey-br_netfilter.conf +fi + +modinfo overlay > /dev/null 2>&1 +if [ $? -eq 0 ]; then + modprobe overlay + echo 'overlay' >> /etc/modules-load.d/kubekey-br_netfilter.conf +fi + +modprobe ip_vs +modprobe ip_vs_rr +modprobe ip_vs_wrr +modprobe ip_vs_sh + +cat > /etc/modules-load.d/kube_proxy-ipvs.conf << EOF +ip_vs +ip_vs_rr +ip_vs_wrr +ip_vs_sh +EOF + +modprobe nf_conntrack_ipv4 1>/dev/null 2>/dev/null +if [ $? -eq 0 ]; then + echo 'nf_conntrack_ipv4' > /etc/modules-load.d/kube_proxy-ipvs.conf +else + modprobe nf_conntrack + echo 'nf_conntrack' > /etc/modules-load.d/kube_proxy-ipvs.conf +fi +sysctl -p + +sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' /etc/hosts +sed -i '/^$/N;/\n$/N;//D' /etc/hosts + +cat >>/etc/hosts< /proc/sys/vm/drop_caches + +# Make sure the iptables utility doesn't use the nftables backend. +update-alternatives --set iptables /usr/sbin/iptables-legacy >/dev/null 2>&1 || true +update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true +update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true +update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true + + diff --git a/builtin/roles/install/certs/defaults/main.yaml b/builtin/roles/install/certs/defaults/main.yaml new file mode 100644 index 00000000..c6a5a641 --- /dev/null +++ b/builtin/roles/install/certs/defaults/main.yaml @@ -0,0 +1,4 @@ +renew_certs: + enabled: false + is_docker: "{% if (cri.container_manager == 'docker') %}true{% else %}false{% endif %}" + is_kubeadm_alpha: "{% if (kube_version|version:'>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done +echo "## Expiration after renewal ##" +${kubeadmCerts} check-expiration diff --git a/builtin/roles/install/cri/defaults/main.yaml b/builtin/roles/install/cri/defaults/main.yaml new file mode 100644 index 00000000..e94eae55 --- /dev/null +++ b/builtin/roles/install/cri/defaults/main.yaml @@ -0,0 +1,12 @@ +cri: + # support: systemd, cgroupfs + cgroup_driver: systemd + sandbox_image: "k8s.gcr.io/pause:3.5" + # support: containerd,docker,crio + container_manager: docker + # the endpoint of containerd + cri_socket: "{% if (cri.container_manager=='containerd') %}unix:///var/run/containerd.sock{% endif %}" +# containerd: +# data_root: /var/lib/containerd + docker: + data_root: /var/lib/docker diff --git a/builtin/roles/install/cri/files/containerd.service b/builtin/roles/install/cri/files/containerd.service new file mode 100644 index 00000000..5f67110a --- /dev/null +++ b/builtin/roles/install/cri/files/containerd.service @@ -0,0 +1,26 @@ +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart=/usr/local/bin/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +LimitNOFILE=1048576 +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/cri/files/cri_docker.service b/builtin/roles/install/cri/files/cri_docker.service new file mode 100644 index 00000000..8de02ba1 --- /dev/null +++ b/builtin/roles/install/cri/files/cri_docker.service @@ -0,0 +1,36 @@ +[Unit] +Description=CRI Interface for Docker Application Container Engine +Documentation=https://docs.mirantis.com + +[Service] +Type=notify +ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image {{ .SandBoxImage }} +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity +Delegate=yes +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/cri/files/docker.service b/builtin/roles/install/cri/files/docker.service new file mode 100644 index 00000000..929b8a43 --- /dev/null +++ b/builtin/roles/install/cri/files/docker.service @@ -0,0 +1,47 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +# After=network-online.target firewalld.service containerd.service +# Wants=network-online.target +# Requires=docker.socket containerd.service + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity + +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes + +# kill only the docker process, not all processes in the cgroup +KillMode=process +OOMScoreAdjust=-500 + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/cri/tasks/install_containerd.yaml b/builtin/roles/install/cri/tasks/install_containerd.yaml new file mode 100644 index 00000000..86af736c --- /dev/null +++ b/builtin/roles/install/cri/tasks/install_containerd.yaml @@ -0,0 +1,45 @@ +--- +- name: Check if runc is installed + ignore_errors: true + command: runc --version + register: runc_install_version + +- name: Sync Runc Binary to remote + copy: + src: "{{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ binary_type.stdout }}/runc.{{ binary_type.stdout }}" + dest: "/usr/local/bin/runc" + mode: 0755 + when: runc_install_version.stderr != "" + +- name: Check if Containerd is installed + ignore_errors: true + command: containerd --version + register: containerd_install_version + +- name: Sync Containerd Binary to remote + copy: + src: "{{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ binary_type.stdout }}/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz" + when: containerd_install_version.stderr != "" + +- name: Unpackage Containerd binary + command: | + tar -xvf /tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/ + when: containerd_install_version.stderr != "" + +- name: Generate Containerd config file + template: + src: containerd.config + dest: /etc/containerd/config.toml + when: containerd_install_version.stderr != "" + +- name: Generate Containerd Service file + copy: + src: containerd.service + dest: /etc/systemd/system/containerd.service + when: containerd_install_version.stderr != "" + +- name: Start Containerd + command: | + systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service + when: containerd_install_version.stderr != "" diff --git a/builtin/roles/install/cri/tasks/install_crictl.yaml b/builtin/roles/install/cri/tasks/install_crictl.yaml new file mode 100644 index 00000000..111003db --- /dev/null +++ b/builtin/roles/install/cri/tasks/install_crictl.yaml @@ -0,0 +1,21 @@ +--- +- name: Check if crictl is installed + ignore_errors: true + command: crictl --version + register: crictl_install_version + +- name: Sync crictl binary to remote + copy: + src: "{{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ binary_type.stdout }}/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz" + when: crictl_install_version.stderr != "" + +- name: Unpackage crictl binary + command: | + tar -xvf /tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/ + when: crictl_install_version.stderr != "" + +- name: Generate crictl config file + template: + src: crictl.config + dest: /etc/crictl.yaml diff --git a/builtin/roles/install/cri/tasks/install_cridockerd.yaml b/builtin/roles/install/cri/tasks/install_cridockerd.yaml new file mode 100644 index 00000000..57e9671c --- /dev/null +++ b/builtin/roles/install/cri/tasks/install_cridockerd.yaml @@ -0,0 +1,33 @@ +--- +- name: Check if cri-dockerd is installed + ignore_errors: true + command: cri-dockerd --version + register: cridockerd_install_version + +- name: Sync cri-dockerd Binary to remote + copy: + src: "{{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ binary_type.stdout }}/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz" + when: cridockerd_install_version.stderr != "" + +- name: Generate cri-dockerd config file + template: + src: cri-dockerd.config + dest: /etc/cri-dockerd.yaml + when: cridockerd_install_version.stderr != "" + +- name: Unpackage cri-dockerd binary + command: | + tar -xvf /tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/ + when: cridockerd_install_version.stderr != "" + +- name: Generate cri-dockerd Service file + template: + src: cri-dockerd.service + dest: /etc/systemd/system/cri-dockerd.service + when: cridockerd_install_version.stderr != "" + +- name: Start cri-dockerd service + command: | + systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service + when: cridockerd_install_version.stderr != "" diff --git a/builtin/roles/install/cri/tasks/install_docker.yaml b/builtin/roles/install/cri/tasks/install_docker.yaml new file mode 100644 index 00000000..5ff46608 --- /dev/null +++ b/builtin/roles/install/cri/tasks/install_docker.yaml @@ -0,0 +1,40 @@ +--- +- name: Check if docker is installed + ignore_errors: true + command: docker --version + register: docker_install_version + +- name: Sync docker binary to remote + copy: + src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz" + dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz" + when: docker_install_version.stderr != "" + +- name: Unpackage docker binary + command: | + tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/* + when: docker_install_version.stderr != "" + +- name: Generate docker config file + template: + src: docker.config + dest: /etc/docker/daemon.json + when: docker_install_version.stderr != "" + +- name: Generate docker service file + copy: + src: docker.service + dest: /etc/systemd/system/docker.service + when: docker_install_version.stderr != "" + +- name: Generate containerd service file + copy: + src: containerd.service + dest: /etc/systemd/system/containerd.service + when: docker_install_version.stderr != "" + +- name: Start docker service + command: | + systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service + systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service + when: docker_install_version.stderr != "" diff --git a/builtin/roles/install/cri/tasks/main.yaml b/builtin/roles/install/cri/tasks/main.yaml new file mode 100644 index 00000000..f372fb66 --- /dev/null +++ b/builtin/roles/install/cri/tasks/main.yaml @@ -0,0 +1,19 @@ +--- +# install crictl +- include_tasks: install_crictl.yaml + +# install docker +- include_tasks: install_docker.yaml + when: cri.container_manager == "docker" + + # install containerd +- include_tasks: install_containerd.yaml + when: cri.container_manager == "containerd" + +# install cridockerd +- include_tasks: install_cridockerd.yaml + when: + - cri.container_manager == "docker" + - kube_version|version:'>=v1.24.0' + + diff --git a/builtin/roles/install/cri/templates/containerd.config b/builtin/roles/install/cri/templates/containerd.config new file mode 100644 index 00000000..9f40e623 --- /dev/null +++ b/builtin/roles/install/cri/templates/containerd.config @@ -0,0 +1,76 @@ +version = 2 + +root = {{ cri.containerd.data_root|default_if_none:"/var/lib/containerd" }} +state = "/run/containerd" + +[grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + +[ttrpc] + address = "" + uid = 0 + gid = 0 + +[debug] + address = "" + uid = 0 + gid = 0 + level = "" + +[metrics] + address = "" + grpc_histogram = false + +[cgroup] + path = "" + +[timeouts] + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + +[plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "{{ cri.sandbox_image }}" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = {% if (cri.cgroup_driver=="systemd") %}true{% else %}false{% endif %} + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + max_conf_num = 1 + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + {% if (registry.mirrors|defined) %} + endpoint = {{ registry.mirrors|to_json|safe }} + {% else %} + endpoint = ["https://registry-1.docker.io"] + {% endif %} + {% for ir in registry.insecure_registries %} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ ir }}"] + endpoint = ["http://{{ ir }}"] + {% endfor %} + + {% if (registry.auths|length > 0) %} + [plugins."io.containerd.grpc.v1.cri".registry.configs] + {% for ir in registry.auths %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".auth] + username = "{{ ir.username }}" + password = "{{ ir.password }}" + {% if (ir.ca_file|defined) %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".tls] + ca_file = "{{ ir.ca_file }}" + cert_file = "{{ ir.crt_file }}" + key_file = "{{ ir.key_file }}" + insecure_skip_verify = {{ ir.skip_ssl }} + {% endif %} + {% endfor %} + {% endif %} diff --git a/builtin/roles/install/cri/templates/crictl.config b/builtin/roles/install/cri/templates/crictl.config new file mode 100644 index 00000000..10891f63 --- /dev/null +++ b/builtin/roles/install/cri/templates/crictl.config @@ -0,0 +1,5 @@ +runtime-endpoint: {{ cri.container_runtime_endpoint }} +image-endpoint: {{ cri.container_runtime_endpoint }} +timeout: 5 +debug: false +pull-image-on-create: false diff --git a/builtin/roles/install/cri/templates/docker.config b/builtin/roles/install/cri/templates/docker.config new file mode 100644 index 00000000..5540fd1c --- /dev/null +++ b/builtin/roles/install/cri/templates/docker.config @@ -0,0 +1,19 @@ +{ + "log-opts": { + "max-size": "5m", + "max-file":"3" + }, + {% if (cri.docker.data_root|defined) %} + "data-root": "{{ cri.docker.data_root }}", + {% endif %} + {% if (registry.mirrors|defined) %} + "registry-mirrors": {{ registry.mirrors|to_json|safe }}, + {% endif %} + {% if (registry.insecure_registries|defined) %} + "insecure-registries": {{ registry.insecure_registries|to_json|safe }}, + {% endif %} + {% if (cri.docker.bridge_ip|defined) %} + "bip": "{{ cri.docker.bridge_ip }}", + {% endif %} + "exec-opts": ["native.cgroupdriver={{ cri.cgroup_driver }}"] +} diff --git a/builtin/roles/install/etcd/defaults/main.yaml b/builtin/roles/install/etcd/defaults/main.yaml new file mode 100644 index 00000000..bae4b727 --- /dev/null +++ b/builtin/roles/install/etcd/defaults/main.yaml @@ -0,0 +1,24 @@ +etcd: + # endpoints: ["https://172.1.1.1:2379"] + # etcd binary + state: new +# env config + env: + election_timeout: 5000 + heartbeat_interval: 250 + compaction_retention: 8 + snapshot_count: 10000 + data_dir: /var/lib/etcd +# metrics: basic +# quota_backend_bytes: 100 +# max_request_bytes: 100 +# max_snapshots: 100 +# max_wals: 5 +# log_level: info +# unsupported_arch: arm64 +# backup config + backup: + backup_dir: /var/lib/etcd-backup + keep_backup_number: 5 +# etcd_backup_script: /usr/local/bin/kube-scripts/backup-etcd.sh + on_calendar: "*-*-* *:00/30:00" diff --git a/builtin/roles/install/etcd/files/backup.service b/builtin/roles/install/etcd/files/backup.service new file mode 100644 index 00000000..a03f4226 --- /dev/null +++ b/builtin/roles/install/etcd/files/backup.service @@ -0,0 +1,5 @@ +[Unit] +Description=Backup ETCD +[Service] +Type=oneshot +ExecStart=/usr/local/bin/kube-scripts/backup_etcd.sh diff --git a/builtin/roles/install/etcd/files/etcd.service b/builtin/roles/install/etcd/files/etcd.service new file mode 100644 index 00000000..d26a6958 --- /dev/null +++ b/builtin/roles/install/etcd/files/etcd.service @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +User=root +Type=notify +EnvironmentFile=/etc/etcd.env +ExecStart=/usr/local/bin/etcd +NotifyAccess=all +RestartSec=10s +LimitNOFILE=40000 +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/etcd/tasks/backup_etcd.yaml b/builtin/roles/install/etcd/tasks/backup_etcd.yaml new file mode 100644 index 00000000..ddbc4e69 --- /dev/null +++ b/builtin/roles/install/etcd/tasks/backup_etcd.yaml @@ -0,0 +1,29 @@ +--- +- name: Generate default backup etcd script + template: + src: "backup.sh" + dest: "/usr/local/bin/kube-scripts/backup-etcd.sh" + mode: 777 + when: + - ! etcd.backup.etcd_backup_script|defined + +- name: Sync custom backup etcd script + template: + src: "{{ etcd.backup.etcd_backup_script }}" + dest: "/usr/local/bin/kube-scripts/backup-etcd.sh" + mode: 777 + when: etcd.backup.etcd_backup_script|defined + +- name: Generate backup etcd service + copy: + src: "backup.service" + dest: "/etc/systemd/system/backup-etcd.service" + +- name: Generate backup etcd timer + template: + src: "backup.timer" + dest: "/etc/systemd/system/backup-etcd.timer" + +- name: Enable etcd timer + command: | + systemctl daemon-reload && systemctl enable --now backup-etcd.timer diff --git a/builtin/roles/install/etcd/tasks/install_etcd.yaml b/builtin/roles/install/etcd/tasks/install_etcd.yaml new file mode 100644 index 00000000..f25ab7cf --- /dev/null +++ b/builtin/roles/install/etcd/tasks/install_etcd.yaml @@ -0,0 +1,38 @@ +--- +- name: Sync etcd binary to node + copy: + src: "{{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ binary_type.stdout }}/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz" + +- name: Extract etcd binary + command: | + tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz \ + --wildcards etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}/etcd* + +- name: Sync ca file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/root.crt" + dest: "/etc/ssl/etcd/ssl/ca.crt" + +- name: Sync etcd cert file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/etcd.crt" + dest: "/etc/ssl/etcd/ssl/server.crt" + +- name: Sync etcd key file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/etcd.key" + dest: "/etc/ssl/etcd/ssl/server.key" + +- name: Generate etcd env file + template: + src: "etcd.env" + dest: "/etc/etcd.env" + +- name: Generate etcd systemd service file + copy: + src: "etcd.service" + dest: "/etc/systemd/system/etcd.service" + +- name: Start etcd service + command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd diff --git a/builtin/roles/install/etcd/tasks/main.yaml b/builtin/roles/install/etcd/tasks/main.yaml new file mode 100644 index 00000000..b33fa286 --- /dev/null +++ b/builtin/roles/install/etcd/tasks/main.yaml @@ -0,0 +1,26 @@ +--- +- name: Check if etcd is installed + ignore_errors: true + command: etcd --version + run_once: true + register: etcd_install_version + +- name: Init etcd + when: etcd_install_version.stderr != "" + block: + - name: Add etcd user + command: | + useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || : + - name: Create etcd directories + command: | + if [ ! -d "{{ item }}" ]; then + mkdir -p {{ item }} && chown -R etcd {{ item }} + fi + loop: + - "/var/lib/etcd" + +- include_tasks: install_etcd.yaml + when: etcd_install_version.stderr != "" + +- include_tasks: backup_etcd.yaml + when: etcd_install_version.stderr != "" diff --git a/builtin/roles/install/etcd/templates/backup.script b/builtin/roles/install/etcd/templates/backup.script new file mode 100644 index 00000000..e9165227 --- /dev/null +++ b/builtin/roles/install/etcd/templates/backup.script @@ -0,0 +1,33 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +ETCDCTL_PATH='/usr/local/bin/etcdctl' +ENDPOINTS='https://{{ internal_ipv4 }}:2379' +ETCD_DATA_DIR="{{ etcd.env.data_dir }}" +BACKUP_DIR="{{ etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)" +KEEPBACKUPNUMBER='{{ etcd.backup.keep_backup_number }}' +((KEEPBACKNUMBER++)) + +ETCDCTL_CERT="/etc/ssl/etcd/ssl/server.crt" +ETCDCTL_KEY="/etc/ssl/etcd/ssl/server.key" +ETCDCTL_CA_FILE="/etc/ssl/etcd/ssl/ca.crt" + +[ ! -d $BACKUP_DIR ] && mkdir -p $BACKUP_DIR + +export ETCDCTL_API=2;$ETCDCTL_PATH backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR + +sleep 3 + +{ +export ETCDCTL_API=3;$ETCDCTL_PATH --endpoints="$ENDPOINTS" snapshot save $BACKUP_DIR/snapshot.db \ + --cacert="$ETCDCTL_CA_FILE" \ + --cert="$ETCDCTL_CERT" \ + --key="$ETCDCTL_KEY" +} > /dev/null + +sleep 3 + +cd $BACKUP_DIR/../ && ls -lt |awk '{if(NR > '$KEEPBACKUPNUMBER'){print "rm -rf "$9}}'|sh diff --git a/builtin/roles/install/etcd/templates/backup.timer b/builtin/roles/install/etcd/templates/backup.timer new file mode 100644 index 00000000..6141397d --- /dev/null +++ b/builtin/roles/install/etcd/templates/backup.timer @@ -0,0 +1,7 @@ +[Unit] +Description=Timer to backup ETCD +[Timer] +OnCalendar={{ etcd.backup.on_calendar }} +Unit=backup-etcd.service +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/etcd/templates/etcd.env b/builtin/roles/install/etcd/templates/etcd.env new file mode 100644 index 00000000..e817b7a9 --- /dev/null +++ b/builtin/roles/install/etcd/templates/etcd.env @@ -0,0 +1,53 @@ +ETCD_DATA_DIR={{ etcd.env.data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }} +ETCD_INITIAL_CLUSTER_STATE={{ etcd.state }} +ETCD_LISTEN_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }},https://127.0.0.1:2379 +ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd +ETCD_LISTEN_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }} +ETCD_NAME={{ inventory_name }} +ETCD_PROXY=off +ETCD_ENABLE_V2=true +ETCD_INITIAL_CLUSTER={% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}{{ hv.inventory_name }}={{ hv.internal_ipv4|stringformat:"https://%s:2380" }}{% if (not forloop.Last) %},{% endif %}{% endfor %} +ETCD_ELECTION_TIMEOUT={{ etcd.env.election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd.env.heartbeat_interval }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd.env.compaction_retention }} +ETCD_SNAPSHOT_COUNT={{ etcd.env.snapshot_count }} +{% if (etcd.metrics|defined) %} +ETCD_METRICS={{ etcd.env.metrics }} +{% endif %} +{% if (etcd.env.quota_backend_bytes|defined) %} +ETCD_QUOTA_BACKEND_BYTES={{ etcd.env.quota_backend_bytes }} +{% endif %} +{% if (etcd.env.max_request_bytes|defined) %} +ETCD_MAX_REQUEST_BYTES={{ etcd.env.max_request_bytes }} +{% endif %} +{% if (etcd.env.max_snapshots|defined) %} +ETCD_MAX_SNAPSHOTS={{ etcd.env.max_snapshots }} +{% endif %} +{% if (etcd.env.max_wals|defined) %} +ETCD_MAX_WALS={{ etcd.env.max_wals }} +{% endif %} +{% if (etcd.env.log_level|defined) %} +ETCD_LOG_LEVEL={{ etcd.env.log_level }} +{% endif %} +{% if (etcd.env.unsupported_arch|defined) %} +ETCD_UNSUPPORTED_ARCH={{ etcd.env.unsupported_arch }} +{% endif %} + +# TLS settings +ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt +ETCD_CERT_FILE=/etc/ssl/etcd/ssl/server.crt +ETCD_KEY_FILE=/etc/ssl/etcd/ssl/server.key +ETCD_CLIENT_CERT_AUTH=true + +ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt +ETCD_PEER_CERT_FILE=/etc/ssl/etcd/ssl/server.crt +ETCD_PEER_KEY_FILE=/etc/ssl/etcd/ssl/server.key +ETCD_PEER_CLIENT_CERT_AUTH=true + +# CLI settings +ETCDCTL_ENDPOINTS=https://127.0.0.1:2379 +ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.crt +ETCDCTL_CERT=/etc/ssl/etcd/ssl/server.crt +ETCDCTL_KEY=/etc/ssl/etcd/ssl/server.key diff --git a/builtin/roles/install/image-registry/defaults/main.yaml b/builtin/roles/install/image-registry/defaults/main.yaml new file mode 100644 index 00000000..2c03fafb --- /dev/null +++ b/builtin/roles/install/image-registry/defaults/main.yaml @@ -0,0 +1,44 @@ +image_registry: + # registry type. support: harbor, registry + type: harbor + # Virtual IP address for repository High Availability. the Virtual IP address should be available. +# ha_vip: 192.168.122.59 + harbor: + admin_password: Harbor12345 + registry: + version: 2 + config: + storage: nfs + nfs_dir: /share/registry + storage: + filesystem: + rootdirectory: /var/lib/registry +# nfs_mount: /repository/registry # if set. will mount rootdirectory to nfs server in nfs_mount. +# azure: +# accountname: accountname +# accountkey: base64encodedaccountkey +# container: containername +# gcs: +# bucket: bucketname +# keyfile: /path/to/keyfile +# credentials: +# type: service_account +# project_id: project_id_string +# private_key_id: private_key_id_string +# private_key: private_key_string +# client_email: client@example.com +# client_id: client_id_string +# auth_uri: http://example.com/auth_uri +# token_uri: http://example.com/token_uri +# auth_provider_x509_cert_url: http://example.com/provider_cert_url +# client_x509_cert_url: http://example.com/client_cert_url +# rootdirectory: /gcs/object/name/prefix +# s3: +# accesskey: awsaccesskey +# secretkey: awssecretkey +# region: us-west-1 +# regionendpoint: http://myobjects.local +# bucket: bucketname +# keyid: mykeyid +# rootdirectory: /s3/object/name/prefix + diff --git a/builtin/roles/install/image-registry/files/containerd.service b/builtin/roles/install/image-registry/files/containerd.service new file mode 100644 index 00000000..5f67110a --- /dev/null +++ b/builtin/roles/install/image-registry/files/containerd.service @@ -0,0 +1,26 @@ +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart=/usr/local/bin/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +LimitNOFILE=1048576 +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/image-registry/files/docker.service b/builtin/roles/install/image-registry/files/docker.service new file mode 100644 index 00000000..929b8a43 --- /dev/null +++ b/builtin/roles/install/image-registry/files/docker.service @@ -0,0 +1,47 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +# After=network-online.target firewalld.service containerd.service +# Wants=network-online.target +# Requires=docker.socket containerd.service + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity + +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes + +# kill only the docker process, not all processes in the cgroup +KillMode=process +OOMScoreAdjust=-500 + +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/image-registry/tasks/install_docker.yaml b/builtin/roles/install/image-registry/tasks/install_docker.yaml new file mode 100644 index 00000000..4ec27840 --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/install_docker.yaml @@ -0,0 +1,40 @@ +--- +- name: Check if docker is installed + ignore_errors: true + command: docker --version + register: docker_install_version + +- name: Sync docker binary to remote + copy: + src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz" + dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz" + when: docker_install_version.stderr != "" + +- name: Generate docker config file + template: + src: "docker.config" + dest: "/etc/docker/daemon.json" + when: docker_install_version.stderr != "" + +- name: Unpackage docker binary + command: | + tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/* + when: docker_install_version.stderr != "" + +- name: Generate docker service file + copy: + src: "docker.service" + dest: "/etc/systemd/system/docker.service" + when: docker_install_version.stderr != "" + +- name: Generate containerd service file + copy: + src: "containerd.service" + dest: "/etc/systemd/system/containerd.service" + when: docker_install_version.stderr != "" + +- name: Start docker service + command: | + systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service + systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service + when: docker_install_version.stderr != "" diff --git a/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml b/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml new file mode 100644 index 00000000..9e6dcee3 --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml @@ -0,0 +1,13 @@ +--- +- name: Check if docker-compose is installed + ignore_errors: true + command: docker-compose --version + register: dockercompose_install_version + +- name: Sync docker-compose to remote + copy: + src: "{{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ binary_type.stdout }}/docker-compose" + dest: "/usr/local/bin/docker-compose" + mode: 0755 + when: + - dockercompose_install_version.stderr != "" diff --git a/builtin/roles/install/image-registry/tasks/install_harbor.yaml b/builtin/roles/install/image-registry/tasks/install_harbor.yaml new file mode 100644 index 00000000..c97bb4cd --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/install_harbor.yaml @@ -0,0 +1,57 @@ +--- +- name: Check image registry if installed + ignore_errors: true + command: systemctl status harbor.service + register: image_registry_service + +- name: Sync harbor package to remote + copy: + src: "{{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ binary_type.stdout }}/harbor-offline-installer-{{ harbor_version }}.tgz" + dest: "/opt/harbor/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz" + when: image_registry_service.stderr != "" + +- name: Untar harbor package + command: | + cd /opt/harbor/{{ harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ harbor_version }}.tgz + when: image_registry_service.stderr != "" + +- name: Sync image registry cert file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/image_registry.crt" + dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt" + when: image_registry_service.stderr != "" + +- name: Sync image registry key file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/image_registry.key" + dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key" + when: image_registry_service.stderr != "" + +- name: Generate harbor config + template: + src: "harbor.config" + dest: "/opt/harbor/{{ harbor_version }}/harbor/harbor.yml" + when: image_registry_service.stderr != "" + +- name: Generate keepalived docker compose + template: + src: "harbor_keepalived.docker-compose" + dest: "/opt/harbor/{{ harbor_version }}/harbor/docker-compose-keepalived.yml" + when: + - image_registry.ha_vip | defined + - image_registry_service.stderr != "" + +- name: Install harbor + command: | + cd /opt/harbor/{{ harbor_version }}/harbor && /bin/bash install.sh + when: image_registry_service.stderr != "" + +- name: Register harbor service + template: + src: "harbor.service" + dest: "/etc/systemd/system/harbor.service" + when: image_registry_service.stderr != "" + +- name: Start harbor service + command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service + when: image_registry_service.stderr != "" diff --git a/builtin/roles/install/image-registry/tasks/install_keepalived.yaml b/builtin/roles/install/image-registry/tasks/install_keepalived.yaml new file mode 100644 index 00000000..7f1efefa --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/install_keepalived.yaml @@ -0,0 +1,19 @@ +--- +- name: Sync keepalived image to remote + copy: + src: "{{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ binary_type.stdout }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz" + dest: "/opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz" + +- name: Load keeplived image + command: | + docker load -i /opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz + +- name: Sync keeplived config to remote + template: + src: "keeplived.config" + dest: "/opt/keeplived/{{ keepalived_version }}/keepalived.conf" + +- name: Sync healthcheck shell to remote + template: + src: "keepalived.healthcheck" + dest: "/opt/keeplived/{{ keepalived_version }}/healthcheck.sh" diff --git a/builtin/roles/install/image-registry/tasks/install_registry.yaml b/builtin/roles/install/image-registry/tasks/install_registry.yaml new file mode 100644 index 00000000..d44aacb4 --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/install_registry.yaml @@ -0,0 +1,65 @@ +--- +- name: Check image registry if installed + ignore_errors: true + command: systemctl status registry.service + register: image_registry_service + +- name: Sync registry image to remote + copy: + src: "{{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ binary_type.stdout }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz" + dest: "/opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz" + when: image_registry_service.stderr != "" + +- name: Mount NFS dir + command: | + if [ {{ os.release.ID_LIKE }} == 'debian' ]; then + yum update && yum install -y nfs-utils + elif [ {{ os.release.ID_LIKE }} == 'rhel fedora' ] + apt update && apt install -y nfs-common + fi + nfsHostName={{ groups['nfs']|first }} + {% set hv=inventory_hosts['$nfsHostName'] %} + mount -t nfs {{ hv.internal_ipv4 }}:{{ image_registry.registry.storage.filesystem.nfs_mount }} {{ image_registryregistry.storage.filesystem.rootdirectory }} + when: + - image_registry.registry.storage.filesystem.nfs_mount | defined + - groups['nfs']|length == 1 + - image_registry_service.stderr != "" + +- name: Load registry image + command: | + docker load -i /opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz + when: image_registry_service.stderr != "" + +- name: Sync image registry cert file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/image_registry.crt" + dest: "/opt/registry/{{ registry_version }}/ssl/server.crt" + when: image_registry_service.stderr != "" + +- name: Sync image registry key file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/image_registry.key" + dest: "/opt/registry/{{ registry_version }}/ssl/server.key" + when: image_registry_service.stderr != "" + +- name: Generate registry docker compose + template: + src: "registry.docker-compose" + dest: "/opt/registry/{{ registry_version }}/docker-compose.yml" + when: image_registry_service.stderr != "" + +- name: Generate registry config + template: + src: "registry.config" + dest: "/opt/registry/{{ registry_version }}/config.yml" + when: image_registry_service.stderr != "" + +- name: Register registry service + copy: + src: "registry.service" + dest: "/etc/systemd/system/registry.service" + when: image_registry_service.stderr != "" + +- name: Start registry service + command: systemctl daemon-reload && systemctl start registry.service && systemctl enable registry.service + when: image_registry_service.stderr != "" diff --git a/builtin/roles/install/image-registry/tasks/load_images.yaml b/builtin/roles/install/image-registry/tasks/load_images.yaml new file mode 100644 index 00000000..88e96eb6 --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/load_images.yaml @@ -0,0 +1,51 @@ +--- +- name: Check if image to load + ignore_errors: true + command: | + ls {{ work_dir }}/kubekey/images/ + register: local_images_dir + +- name: Sync oras to remote + copy: + src: "{{ work_dir }}/kubekey/oras/{{ oras_version }}/{{ binary_type.stdout }}/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz" + when: local_images_dir.stderr == "" + +- name: Unpackage oras binary + command: tar -zxvf /tmp/kubekey/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz -C /usr/local/bin oras + when: local_images_dir.stderr == "" + +- name: Sync images package to remote + copy: + src: "{{ work_dir }}/kubekey/images/" + dest: "/tmp/kubekey/images/" + when: local_images_dir.stderr == "" + +- name: Sync images to registry + command: | + for dir in /tmp/kubekey/images/*; do + if [ ! -d "$dir" ]; then + # only deal directory + continue + fi + + IFS='=' read -ra array <<< "${dir##*/}" + if [ $(echo ${my_array[@]} | wc -w) > 3 ]; then + project=${array[1]} + dest_image=$(echo "${array[@]:2:-1}" | tr ' ' '/') + tag=${array[-1]} + else + echo "unsupported image" + exit 1 + fi + + # if project is not exist, create if + http_code=$(curl -Iks -u "admin:{{ image_registry.harbor.admin_password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}') + if [ $http_code == 404 ]; then + # create project + curl -u "admin:{{ image_registry.harbor.admin_password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}" + fi + + oras cp --to-username admin --to-password {{ image_registry.harbor.admin_password }} ${dir##*/} localhost/${project}/${dest_image}:${tag} + done + when: local_images_dir.stderr == "" diff --git a/builtin/roles/install/image-registry/tasks/main.yaml b/builtin/roles/install/image-registry/tasks/main.yaml new file mode 100644 index 00000000..59912645 --- /dev/null +++ b/builtin/roles/install/image-registry/tasks/main.yaml @@ -0,0 +1,16 @@ +--- +- include_tasks: install_docker.yaml + +- include_tasks: install_docker_compose.yaml + +- include_tasks: install_keepalived.yaml + when: image_registry.ha_vip | defined + +- name: Install image registry + block: + - include_tasks: install_registry.yaml + when: image_registry.type == 'registry' + - include_tasks: install_harbor.yaml + when: image_registry.type == 'harbor' + +- include_tasks: load_images.yaml diff --git a/builtin/roles/install/image-registry/templates/docker.config b/builtin/roles/install/image-registry/templates/docker.config new file mode 100644 index 00000000..8ae73211 --- /dev/null +++ b/builtin/roles/install/image-registry/templates/docker.config @@ -0,0 +1,19 @@ +{ + "log-opts": { + "max-size": "5m", + "max-file":"3" + }, + {% if (cri.docker.data_root|defined) %} + "data-root": {{ cri.docker.data_root }}, + {% endif %} + {% if (registry.mirrors|defined) %} + "registry-mirrors": {{ registry.mirrors|to_json|safe }}, + {% endif %} + {% if (registry.insecure_registries|defined) %} + "insecure-registries": {{ registry.insecure_registries|to_json|safe }}, + {% endif %} + {% if (cri.docker.bridge_ip|defined) %} + "bip": "{{ cri.docker.bridge_ip }}", + {% endif %} + "exec-opts": ["native.cgroupdriver=systemd"] +} diff --git a/builtin/roles/install/image-registry/templates/harbor.config b/builtin/roles/install/image-registry/templates/harbor.config new file mode 100644 index 00000000..5bd6275b --- /dev/null +++ b/builtin/roles/install/image-registry/templates/harbor.config @@ -0,0 +1,311 @@ +# Configuration file of Harbor + +# The IP address or hostname to access admin UI and registry service. +# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients. +hostname: {{ internal_ipv4 }} + +# http related config +http: + # port for http, default is 80. If https enabled, this port will redirect to https port + port: 80 + +# https related config +https: + # https port for harbor, default is 443 + port: 443 + # The path of cert and key files for nginx + certificate: /opt/harbor/{{ harbor_version }}/ssl/server.crt + private_key: /opt/harbor/{{ harbor_version }}/ssl/server.key + # enable strong ssl ciphers (default: false) + # strong_ssl_ciphers: false + +# # Uncomment following will enable tls communication between all harbor components +# internal_tls: +# # set enabled to true means internal tls is enabled +# enabled: true +# # put your cert and key files on dir +# dir: /etc/harbor/tls/internal + + +# Uncomment external_url if you want to enable external proxy +# And when it enabled the hostname will no longer used +# external_url: https://reg.mydomain.com:8433 + +# The initial password of Harbor admin +# It only works in first time to install harbor +# Remember Change the admin password from UI after launching Harbor. +harbor_admin_password: {{ image_registry.harbor.admin_password }} + +# Harbor DB configuration +database: + # The password for the root user of Harbor DB. Change this before any production use. + password: root123 + # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. + max_idle_conns: 100 + # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 1024 for postgres of harbor. + max_open_conns: 900 + # The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age. + # The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + conn_max_lifetime: 5m + # The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time. + # The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + conn_max_idle_time: 0 + +# The default data volume +data_volume: /data + +# Harbor Storage settings by default is using /data dir on local filesystem +# Uncomment storage_service setting If you want to using external storage +# storage_service: +# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore +# # of registry's containers. This is usually needed when the user hosts a internal storage with self signed certificate. +# ca_bundle: + +# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss +# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/ +# filesystem: +# maxthreads: 100 +# # set disable to true when you want to disable registry redirect +# redirect: +# disable: false + +# Trivy configuration +# +# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. +# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached +# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it +# should download a newer version from the Internet or use the cached one. Currently, the database is updated every +# 12 hours and published as a new release to GitHub. +trivy: + # ignoreUnfixed The flag to display only fixed vulnerabilities + ignore_unfixed: false + # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub + # + # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues. + # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and + # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path. + skip_update: false + # + # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the + # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path + skip_java_db_update: false + # + # The offline_scan option prevents Trivy from sending API requests to identify dependencies. + # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. + # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't + # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode. + # It would work if all the dependencies are in local. + # This option doesn't affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment. + offline_scan: false + # + # Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`. + security_check: vuln + # + # insecure The flag to skip verifying registry certificate + insecure: false + # github_token The GitHub access token to download Trivy DB + # + # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough + # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 + # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult + # https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting + # + # You can create a GitHub token by following the instructions in + # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line + # + # github_token: xxx + +jobservice: + # Maximum number of job workers in job service + max_job_workers: 10 + # The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB" + job_loggers: + - STD_OUTPUT + - FILE + # - DB + # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`) + logger_sweeper_duration: 1 #days + +notification: + # Maximum retry count for webhook job + webhook_job_max_retry: 3 + # HTTP client timeout for webhook job + webhook_job_http_client_timeout: 3 #seconds + +# Log configurations +log: + # options are debug, info, warning, error, fatal + level: info + # configs for logs in local storage + local: + # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. + rotate_count: 50 + # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. + # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G + # are all valid. + rotate_size: 200M + # The directory on your host that store log + location: /var/log/harbor + + # Uncomment following lines to enable external syslog endpoint. + # external_endpoint: + # # protocol used to transmit log to external endpoint, options is tcp or udp + # protocol: tcp + # # The host of external endpoint + # host: localhost + # # Port of external endpoint + # port: 5140 + +#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY! +_version: 2.10.0 + +# Uncomment external_database if using external database. +# external_database: +# harbor: +# host: harbor_db_host +# port: harbor_db_port +# db_name: harbor_db_name +# username: harbor_db_username +# password: harbor_db_password +# ssl_mode: disable +# max_idle_conns: 2 +# max_open_conns: 0 + +# Uncomment redis if need to customize redis db +# redis: +# # db_index 0 is for core, it's unchangeable +# # registry_db_index: 1 +# # jobservice_db_index: 2 +# # trivy_db_index: 5 +# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it. +# # harbor_db_index: 6 +# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it. +# # cache_db_index: 7 + +# Uncomment redis if need to customize redis db +# redis: +# # db_index 0 is for core, it's unchangeable +# # registry_db_index: 1 +# # jobservice_db_index: 2 +# # trivy_db_index: 5 +# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it. +# # harbor_db_index: 6 +# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it. +# # cache_layer_db_index: 7 + +# Uncomment external_redis if using external Redis server +# external_redis: +# # support redis, redis+sentinel +# # host for redis: : +# # host for redis+sentinel: +# # :,:,: +# host: redis:6379 +# password: +# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH form. +# # there's a known issue when using external redis username ref:https://github.com/goharbor/harbor/issues/18892 +# # if you care about the image pull/push performance, please refer to this https://github.com/goharbor/harbor/wiki/Harbor-FAQs#external-redis-username-password-usage +# # username: +# # sentinel_master_set must be set to support redis+sentinel +# #sentinel_master_set: +# # db_index 0 is for core, it's unchangeable +# registry_db_index: 1 +# jobservice_db_index: 2 +# trivy_db_index: 5 +# idle_timeout_seconds: 30 +# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it. +# # harbor_db_index: 6 +# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it. +# # cache_layer_db_index: 7 + +# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert. +# uaa: +# ca_file: /path/to/ca + +# Global proxy +# Config http proxy for components, e.g. http://my.proxy.com:3128 +# Components doesn't need to connect to each others via http proxy. +# Remove component from `components` array if want disable proxy +# for it. If you want use proxy for replication, MUST enable proxy +# for core and jobservice, and set `http_proxy` and `https_proxy`. +# Add domain to the `no_proxy` field, when you want disable proxy +# for some special registry. +proxy: + http_proxy: + https_proxy: + no_proxy: + components: + - core + - jobservice + - trivy + +# metric: +# enabled: false +# port: 9090 +# path: /metrics + +# Trace related config +# only can enable one trace provider(jaeger or otel) at the same time, +# and when using jaeger as provider, can only enable it with agent mode or collector mode. +# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed +# if using jaeger agetn mode uncomment agent_host and agent_port +# trace: +# enabled: true +# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth +# sample_rate: 1 +# # # namespace used to differenciate different harbor services +# # namespace: +# # # attributes is a key value dict contains user defined attributes used to initialize trace provider +# # attributes: +# # application: harbor +# # # jaeger should be 1.26 or newer. +# # jaeger: +# # endpoint: http://hostname:14268/api/traces +# # username: +# # password: +# # agent_host: hostname +# # # export trace data by jaeger.thrift in compact mode +# # agent_port: 6831 +# # otel: +# # endpoint: hostname:4318 +# # url_path: /v1/traces +# # compression: false +# # insecure: true +# # # timeout is in seconds +# # timeout: 10 + +# Enable purge _upload directories +upload_purging: + enabled: true + # remove files in _upload directories which exist for a period of time, default is one week. + age: 168h + # the interval of the purge operations + interval: 24h + dryrun: false + +# Cache layer configurations +# If this feature enabled, harbor will cache the resource +# `project/project_metadata/repository/artifact/manifest` in the redis +# which can especially help to improve the performance of high concurrent +# manifest pulling. +# NOTICE +# If you are deploying Harbor in HA mode, make sure that all the harbor +# instances have the same behaviour, all with caching enabled or disabled, +# otherwise it can lead to potential data inconsistency. +cache: + # not enabled by default + enabled: false + # keep cache for one day by default + expire_hours: 24 + +# Harbor core configurations +# Uncomment to enable the following harbor core related configuration items. +# core: +# # The provider for updating project quota(usage), there are 2 options, redis or db, +# # by default is implemented by db but you can switch the updation via redis which +# # can improve the performance of high concurrent pushing to the same project, +# # and reduce the database connections spike and occupies. +# # By redis will bring up some delay for quota usage updation for display, so only +# # suggest switch provider to redis if you were ran into the db connections spike aroud +# # the scenario of high concurrent pushing to same project, no improvment for other scenes. +# quota_update_provider: redis # Or db diff --git a/builtin/roles/install/image-registry/templates/harbor.service b/builtin/roles/install/image-registry/templates/harbor.service new file mode 100644 index 00000000..3f32b16f --- /dev/null +++ b/builtin/roles/install/image-registry/templates/harbor.service @@ -0,0 +1,12 @@ +[Unit] +Description=harbor +After=docker.service systemd-networkd.service systemd-resolved.service +Requires=docker.service + +[Service] +Type=simple +ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/harbor/docker-compose.yml up {% if (image_registry.ha_vip | defined) %}&& /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/docker-compose-keepalived.yml up{% endif %} +ExecStop=/usr/local/bin/docker-compose -p harbor down +Restart=on-failure +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose b/builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose new file mode 100644 index 00000000..4356e855 --- /dev/null +++ b/builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose @@ -0,0 +1,26 @@ +--- +version: '2.3' +services: + keepalived: + image: osixia/keepalived: {{ keepalived_version }} + container_name: keepalived + restart: always + dns_search: . + cap_drop: + - ALL + cap_add: + - CHOWN + - DAC_OVERRIDE + - SETGID + - SETUID + depends_on: + - proxy + volumes: + - type: bind + source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf + target: /container/service/keepalived/assets/keepalived.conf + - type: bind + source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh + target: /etc/keepalived/healthcheck.sh + networks: + - harbor diff --git a/builtin/roles/install/image-registry/templates/keepalived.config b/builtin/roles/install/image-registry/templates/keepalived.config new file mode 100644 index 00000000..ad7309ed --- /dev/null +++ b/builtin/roles/install/image-registry/templates/keepalived.config @@ -0,0 +1,31 @@ +vrrp_script healthcheck { + script "/etc/keepalived/healthcheck.sh" + interval 10 + fall 2 + rise 2 + timeout 5 + init_fail + } + global_defs { + script_user root + router_id harbor-ha + enable_script_security + lvs_sync_daemon ens3 VI_1 + } + vrrp_instance VI_1 { + state BACKUP + interface ens3 + virtual_router_id 31 + priority 50 + advert_int 1 + authentication { + auth_type PASS + auth_pass k8s-test + } + virtual_ipaddress { + {{ image_registry.ha_vip }} + } + track_script { + healthcheck + } + } diff --git a/builtin/roles/install/image-registry/templates/keepalived.healthcheck b/builtin/roles/install/image-registry/templates/keepalived.healthcheck new file mode 100644 index 00000000..cfbf520d --- /dev/null +++ b/builtin/roles/install/image-registry/templates/keepalived.healthcheck @@ -0,0 +1,17 @@ +#!/bin/bash + +{% if (image_registry.type=='registry') %} +# registry service +service=registry:5000 +{% else %} +# harbor service +service=harbor:80 +{% endif %} + +nc -zv -w 2 $service > /dev/null 2>&1 + +if [ $? -eq 0 ]; then + exit 0 +else + exit 1 +fi diff --git a/builtin/roles/install/image-registry/templates/registry.config b/builtin/roles/install/image-registry/templates/registry.config new file mode 100644 index 00000000..1f7c3048 --- /dev/null +++ b/builtin/roles/install/image-registry/templates/registry.config @@ -0,0 +1,218 @@ +version: 0.1 +log: + accesslog: + disabled: true + level: info + formatter: text + fields: + service: registry + environment: staging +# hooks: +# - type: mail +# disabled: true +# levels: +# - panic +# options: +# smtp: +# addr: mail.example.com:25 +# username: mailuser +# password: password +# insecure: true +# from: sender@example.com +# to: +# - errors@example.com +storage: +{% if (image_registryregistry.storage.filesystem|length != 0) %} + filesystem: + rootdirectory: {{ image_registryregistry.storage.filesystem.rootdirectory }} + maxthreads: 100 +{% endif %} +{% if (image_registryregistry.storage.azure|length != 0) %} + azure: + accountname: {{ image_registryregistry.storage.azure.accountname }} + accountkey: {{ image_registryregistry.storage.azure.accountkey }} + container: {{ image_registryregistry.storage.azure.container }} +{% endif %} +{% if (image_registryregistry.storage.gcs|length != 0) %} + gcs: + bucket: {{ image_registryregistry.storage.gcs.bucket }} + keyfile: {{ image_registryregistry.storage.gcs.keyfile }} + credentials: + type: service_account + project_id: {{ image_registryregistry.storage.gcs.credentials.project_id }} + private_key_id: {{ image_registryregistry.storage.gcs.credentials.private_key_id }} + private_key: {{ image_registryregistry.storage.gcs.credentials.private_key }} + client_email: {{ image_registryregistry.storage.gcs.credentials.client_email }} + client_id: {{ image_registryregistry.storage.gcs.credentials.client_id }} + auth_uri: {{ image_registryregistry.storage.gcs.credentials.auth_uri }} + token_uri: {{ image_registryregistry.storage.gcs.credentials.token_uri }} + auth_provider_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.auth_provider_x509_cert_url }} + client_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.client_x509_cert_url }} + rootdirectory: {{ image_registryregistry.storage.gcs.rootdirectory }} +{% endif %} +{% if (image_registryregistry.storage.s3|length != 0) %} + s3: + accesskey: {{ image_registryregistry.storage.s3.accesskey }} + secretkey: {{ image_registryregistry.storage.s3.secretkey }} + region: {{ image_registryregistry.storage.s3.region }} + regionendpoint: {{ image_registryregistry.storage.s3.regionendpoint }} + forcepathstyle: true + accelerate: false + bucket: {{ image_registryregistry.storage.s3.bucket }} + encrypt: true + keyid: {{ image_registryregistry.storage.s3.keyid }} + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: {{ image_registryregistry.storage.s3.rootdirectory }} + usedualstack: false + loglevel: debug +{% endif %} + inmemory: # This driver takes no parameters + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + blobdescriptorsize: 10000 + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false +#auth: +# silly: +# realm: silly-realm +# service: silly-service +# token: +# autoredirect: true +# realm: token-realm +# service: token-service +# issuer: registry-token-issuer +# rootcertbundle: /root/certs/bundle +# htpasswd: +# realm: basic-realm +# path: /path/to/htpasswd +#middleware: +# registry: +# - name: ARegistryMiddleware +# options: +# foo: bar +# repository: +# - name: ARepositoryMiddleware +# options: +# foo: bar +# storage: +# - name: cloudfront +# options: +# baseurl: https://my.cloudfronted.domain.com/ +# privatekey: /path/to/pem +# keypairid: cloudfrontkeypairid +# duration: 3000s +# ipfilteredby: awsregion +# awsregion: us-east-1, use-east-2 +# updatefrequency: 12h +# iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json +# - name: redirect +# options: +# baseurl: https://example.com/ +http: + addr: localhost:5000 +# prefix: /my/nested/registry/ +# host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + draintimeout: 60s + tls: + certificate: /etc/registry/ssl/server.crt + key: /etc/registry/ssl/server.key +# clientcas: +# - /path/to/ca.pem +# - /path/to/another/ca.pem +# letsencrypt: +# cachefile: /path/to/cache-file +# email: emailused@letsencrypt.com +# hosts: [myregistryaddress.org] +# directoryurl: https://acme-v02.api.letsencrypt.org/directory +# debug: +# addr: localhost:5001 +# prometheus: +# enabled: true +# path: /metrics + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false + h2c: + enabled: false +#notifications: +# events: +# includereferences: true +# endpoints: +# - name: alistener +# disabled: false +# url: https://my.listener.com/event +# headers: +# timeout: 1s +# threshold: 10 +# backoff: 1s +# ignoredmediatypes: +# - application/octet-stream +# ignore: +# mediatypes: +# - application/octet-stream +# actions: +# - pull +#redis: +# addr: localhost:6379 +# password: asecret +# db: 0 +# dialtimeout: 10ms +# readtimeout: 10ms +# writetimeout: 10ms +# pool: +# maxidle: 16 +# maxactive: 64 +# idletimeout: 300s +# tls: +# enabled: false +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 +# file: +# - file: /path/to/checked/file +# interval: 10s +# http: +# - uri: http://server.to.check/must/return/200 +# headers: +# Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] +# statuscode: 200 +# timeout: 3s +# interval: 10s +# threshold: 3 +# tcp: +# - addr: redis-server.domain.com:6379 +# timeout: 3s +# interval: 10s +## threshold: 3 +#proxy: +# remoteurl: https://registry-1.docker.io +# username: [username] +# password: [password] +# ttl: 168h +#validation: +# manifests: +# urls: +# allow: +# - ^https?://([^/]+\.)*example\.com/ +# deny: +# - ^https?://www\.example\.com/ diff --git a/builtin/roles/install/image-registry/templates/registry.docker-compose b/builtin/roles/install/image-registry/templates/registry.docker-compose new file mode 100644 index 00000000..5fa35cdb --- /dev/null +++ b/builtin/roles/install/image-registry/templates/registry.docker-compose @@ -0,0 +1,54 @@ +--- +version: '2.3' +services: + registry: + image: registry:{{ registry_version }} + container_name: registry + restart: always + dns_search: . + cap_drop: + - ALL + cap_add: + - CHOWN + - DAC_OVERRIDE + - SETGID + - SETUID + volumes: + - type: bind + source: /opt/registry/{{ registry_version }}/ssl/ + target: /etc/registry/ssl/ + - type: bind + source: /opt/registry/{{ registry_version }}/config.yml + target: /etc/docker/registry/config.yml + port: + - 443:5000 + networks: + - registry +{% if (image_registry.ha_vip | defined) %} + keepalived: + image: osixia/keepalived: {{ keepalived_version }} + container_name: keepalived + restart: always + dns_search: . + cap_drop: + - ALL + cap_add: + - CHOWN + - DAC_OVERRIDE + - SETGID + - SETUID + depends_on: + - registry + volumes: + - type: bind + source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf + target: /container/service/keepalived/assets/keepalived.conf + - type: bind + source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh + target: /etc/keepalived/healthcheck.sh + networks: + - registry +{% endif %} +networks: + registry: + external: false diff --git a/builtin/roles/install/image-registry/templates/registry.service b/builtin/roles/install/image-registry/templates/registry.service new file mode 100644 index 00000000..e8f13ba0 --- /dev/null +++ b/builtin/roles/install/image-registry/templates/registry.service @@ -0,0 +1,12 @@ +[Unit] +Description=harbor +After=docker.service systemd-networkd.service systemd-resolved.service +Requires=docker.service + +[Service] +Type=simple +ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ registry_version }}/docker-compose.yml up +ExecStop=/usr/local/bin/docker-compose -p registry down +Restart=on-failure +[Install] +WantedBy=multi-user.target diff --git a/builtin/roles/install/kubernetes/defaults/main.yaml b/builtin/roles/install/kubernetes/defaults/main.yaml new file mode 100644 index 00000000..ed6c1ea2 --- /dev/null +++ b/builtin/roles/install/kubernetes/defaults/main.yaml @@ -0,0 +1,161 @@ +kubernetes: + cluster_name: cluster.local + # support: flannel, calico + kube_network_plugin: calico + # the minimal version of kubernetes to be installed. + kube_version_min_required: v1.19.10 + # the image repository of kubernetes. + image_repository: "registry.k8s.io" + + # memory size for each kube_worker node.(unit kB) + # should be greater than or equal to minimal_node_memory_mb. + minimal_node_memory_mb: 10 + # the maximum number of pods that can be run on each node. + max_pods: 110 + audit: false + security_enhancement: "{{ security_enhancement|default_if_none:false }}" + networking: + dns_domain: cluster.local + # it supports two value like value1,value2. + # the first value is ipv4_cidr, the last value is ipv6_cidr. + pod_cidr: 10.233.64.0/18 + service_cidr: 10.233.0.0/18 + dns_image: "registry.k8s.io/coredns/coredns:v1.11.1" + dns_cache_image: "kubesphere/k8s-dns-node-cache:1.22.20" + dns_service_ip: "{{ kubernetes.networking.service_cidr|ip_range|slice:':3'|last }}" + # Specify a stable IP address or DNS name for the control plane. +# control_plane_endpoint: lb.kubesphere.local + apiserver: + port: 6443 + certSANs: [] + extra_args: + bind-address: 0.0.0.0 + feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + controller_manager: + # Set the Pod CIDR size of a node. + kube_network_node_prefix: 24 + extra_args: + feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + scheduler: + extra_args: + feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + kube_proxy: + enabled: true + # support ipvs and iptables + mode: "ipvs" + config: + iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s + kubelet: + max_pod: 110 + pod_pids_limit: 10000 + feature_gates: {} + container_log_max_size: 5Mi + container_log_max_files: 3 + extra_args: {} + coredns: + dns_etc_hosts: [] + # the config for zones + zone_configs: + # DNS zones to match. default use port of 53. the format like this. + # .: all dns zone. + # example.com: match *.example.com use dns server with port 53 + # example.com:54: match *.example.com use dns server with port 54 + - zones: [".:53"] + additional_configs: + - errors + - ready + - prometheus :9153 + - loop + - reload + - loadbalance + cache: 30 + kubernetes: + zones: + - "{{ kubernetes.networking.dns_domain }}" + # rewrite performs internal message rewriting. +# rewrite: +# # specify multiple rules and an incoming query matches multiple rules. +# # continue: if the rewrite rule is not matched, the next rule will be matched. +# # stop: if the rewrite rule is not matched, the next rule will not be matched. +# - rule: continue +# # support: type, name, class, edns0, ttl, cname +# # type: the type field of the request will be rewritten. FROM/TO must be a DNS record type (A, MX, etc.). +# # name: the query name in the request is rewritten; by default this is a full match of the name +# # class: the class of the message will be rewritten. +# # edns0: an EDNS0 option can be appended to the request as described below in the EDNS0 Options section. +# # ttl: the TTL value in the response is rewritten. +# # cname: the CNAME target if the response has a CNAME record +# field: name +# # this optional element can be specified for a name or ttl field. +# # exact: the name must be exactly the same as the value. +# # prefix: the name must start with the value. +# # suffix: the name must end with the value. +# # substring: the name must contain the value. +# # regex: the name must match the value. +# type: exact +# value: "example.com example2.com" +# # for field name further options are possible controlling the response rewrites. +# # answer auto: the names in the response is rewritten in a best effort manner. +# # answer name FROM TO: the query name in the response is rewritten matching the from regex pattern. +# # answer value FROM TO: the names in the response is rewritten matching the from regex pattern. +# options: "" + forward: + # the base domain to match for the request to be forwarded. + - from: "." + # the destination endpoints to forward to. The TO syntax allows you to specify a protocol + to: ["/etc/resolv.conf"] + # a space-separated list of domains to exclude from forwarding. + except: [] + # use TCP even when the request comes in over UDP. + force_tcp: false + # try first using UDP even when the request comes in over TCP. + # If response is truncated (TC flag set in response) then do another attempt over TCP. + prefer_udp: false + # the number of subsequent failed health checks that are needed before considering an upstream to be down + # If 0, the upstream will never be marked as down (nor health checked). +# max_fails: 2 + # expire (cached) connections after this time, +# expire: 10s + # define the TLS properties for TLS connection. +# tls: +# # the path to the certificate file. +# cert_file: "" +# # the path to the key file. +# key_file: "" +# # the path to the CA certificate file. +# ca_file: "" +# # allows you to set a server name in the TLS configuration +# tls_servername: "" + # specifies the policy to use for selecting upstream servers. The default is random. + # random: a policy that implements random upstream selection. + # round_robin: a policy that selects hosts based on round robin ordering. + # sequential: a policy that selects hosts based on sequential ordering. +# policy: "random" + # configure the behaviour of health checking of the upstream servers + # format: DURATION [no_rec] [domain FQDN] + # : use a different duration for health checking, the default duration is 0.5s. + # no_rec:optional argument that sets the RecursionDesired-flag of the dns-query used in health checking to false. The flag is default true. + # domain FQDN: set the domain name used for health checks to FQDN. If not configured, the domain name used for health checks is . +# health_check: "" + # limit the number of concurrent queries to MAX. + max_concurrent: 1000 + kube_vip: + enabled: false + # support:BGP, ARP + mode: BGP + image: plndr/kube-vip:v0.7.2 + haproxy: + enabled: false + health_port: 8081 + image: library/haproxy:2.9.6-alpine + etcd: # todo should apply zone variable + # It is possible to deploy etcd with three methods. + # external: Deploy etcd cluster with external etcd cluster. + # internal: Deploy etcd cluster by static pod. + deployment_type: external + image: "k8s.gcr.io/etcd:3.5.0" + custom_label: {} diff --git a/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml b/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml new file mode 100644 index 00000000..1ef9eb67 --- /dev/null +++ b/builtin/roles/install/kubernetes/files/audit/audit_policy.yaml @@ -0,0 +1,123 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps", "serviceaccounts/token"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get responses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" diff --git a/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml b/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml new file mode 100644 index 00000000..9ea36147 --- /dev/null +++ b/builtin/roles/install/kubernetes/files/audit/audit_webhook.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Config +clusters: + - name: kube-auditing + cluster: + server: https://SHOULD_BE_REPLACED:6443/audit/webhook/event + insecure-skip-tls-verify: true +contexts: + - context: + cluster: kube-auditing + user: "" + name: default-context +current-context: default-context +preferences: {} +users: [] diff --git a/builtin/roles/install/kubernetes/files/kubelet.service b/builtin/roles/install/kubernetes/files/kubelet.service new file mode 100644 index 00000000..ca57d677 --- /dev/null +++ b/builtin/roles/install/kubernetes/files/kubelet.service @@ -0,0 +1,15 @@ +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +CPUAccounting=true +MemoryAccounting=true +ExecStart=/usr/local/bin/kubelet +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target + diff --git a/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml b/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml new file mode 100644 index 00000000..06b30e8a --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/deploy_cluster_dns.yaml @@ -0,0 +1,21 @@ +--- +- name: Generate coredns config + template: + src: dns/coredns.deployment + dest: /etc/kubernetes/coredns.yaml + +- name: Apply coredns config + command: "kubectl apply -f /etc/kubernetes/coredns.yaml" + +- name: Get cluster api + command: | + /usr/local/bin/kubectl get svc -n kube-system coredns -o jsonpath='{.spec.clusterIP}' + register: core_dns_ip + +- name: Generate nodelocaldns deployment + template: + src: dns/coredns.deployment + dest: /etc/kubernetes/nodelocaldns.yaml + +- name: Apply coredns deployment + command: "kubectl apply -f /etc/kubernetes/nodelocaldns.yaml" diff --git a/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml b/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml new file mode 100644 index 00000000..e1eb37c0 --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/deploy_haproxy.yaml @@ -0,0 +1,15 @@ +--- +- name: Generate haproxy config + template: + src: haproxy/haproxy.cfg + dest: /etc/kubekey/haproxy/haproxy.cfg + +- name: Get md5 for haproxy config + command: | + md5sum /etc/kubekey/haproxy/haproxy.cfg | cut -d\" \" -f1 + register: cfg_md5 + +- name: Genrate haproxy manifest + template: + src: haproxy/haproxy.yaml + dest: /etc/kubernetes/manifests/haproxy.yaml diff --git a/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml b/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml new file mode 100644 index 00000000..8db71eb2 --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml @@ -0,0 +1,31 @@ +--- +# install with static pod: https://kube-vip.io/docs/installation/static/ +- name: Get interface for ipv4 + command: | + ip route | grep ' {{ internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq + register: interface + +- name: Should ipv4 interface not be empty + assert: interface.stdout != "" + fail_msg: "{{ internal_ipv4 }} cannot be found in network interface." + +- name: Generate kubevip manifest + template: + src: "kubevip/kubevip.{{ kubernetes.kube_vip.mode }}" + dest: "/etc/kubernetes/manifests/kubevip.yaml" + +- name: Update kubelet config + command: | + sed -i 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf + systemctl restart kubelet + +- name: Update kube-proxy config + command: | + set -o pipefail && /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml \ + | sed 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' \ + | /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf replace -f - + /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0 + +- name: Update hosts file + command: | + sed -i 's#.* {{ kubernetes.control_plane_endpoint }}#127.0.0.1 {{ kubernetes.control_plane_endpoint }}s#g' /etc/hosts diff --git a/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml b/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml new file mode 100644 index 00000000..294ccda2 --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml @@ -0,0 +1,76 @@ +--- +- name: Add kube user + command: | + useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube || : + +- name: Create kube directories + command: | + if [ ! -d "{{ item.path }}" ]; then + mkdir -p {{ item.path }} && chown kube -R {{ item.chown }} + fi + loop: + - {path: "/usr/local/bin", chown: "/usr/local/bin"} + - {path: "/etc/kubernetes", chown: "/etc/kubernetes"} + - {path: "/etc/kubernetes/pki", chown: "/etc/kubernetes/pki"} + - {path: "/etc/kubernetes/manifests", chown: "/etc/kubernetes/manifests"} + - {path: "/usr/local/bin/kube-scripts", chown: "/usr/local/bin/kube-scripts"} + - {path: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec", chown: "/usr/libexec/kubernetes"} + - {path: "/etc/cni/net.d", chown: "/etc/cni"} + - {path: "/opt/cni/bin", chown: "/opt/cni"} + - {path: "/var/lib/calico", chown: "/var/lib/calico"} + +- name: Sync external etcd config + when: + - kubernetes.etcd.deployment_type == 'external' && groups['etcd']|length > 0 + block: + - name: Sync etcd ca file to remote + copy: + src: "{{ work_dir }}/kubekey/pki/root.crt" + dest: "/etc/kubernetes/pki/etcd/ca.crt" + - name: Sync etcd cert files to remote + copy: + src: "{{ work_dir }}/kubekey/pki/etcd.crt" + dest: "/etc/kubernetes/pki/etcd/client.crt" + - name: Sync etcd key files to remote + copy: + src: "{{ work_dir }}/kubekey/pki/etcd.key" + dest: "/etc/kubernetes/pki/etcd/client.key" + +- name: Sync audit policy file to remote + copy: + src: "audit" + dest: "/etc/kubernetes/audit/" + when: + - kubernetes.audit + +- name: Generate kubeadm init config + template: + src: "kubeadm/{% if (kube_version|version:'>=v1.24.0') %}kubeadm-init.v1beta3{% else %}kubeadm-init.v1beta2{% endif %}" + dest: "/etc/kubernetes/kubeadm-config.yaml" + +- name: Init kubernetes cluster + block: + - name: Init kubernetes by kubeadm + command: | + /usr/local/bin/kubeadm init \ + --config=/etc/kubernetes/kubeadm-config.yaml \ + --ignore-preflight-errors=FileExisting-crictl,ImagePull \ + {% if (not kubernetes.kube_proxy.enabled) %}--skip-phases=addon/kube-proxy{% endif %} + rescue: + - name: Reset kubeadm if init failed + command: kubeadm reset -f {% if (cri.cri_socket !="") %}--cri-socket {{ cri.cri_socket }}{% endif %} + +- name: Remote master taint + ignore_errors: true + command: | + /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule- + +- name: Copy kubeconfig to default dir + command: | + if [ ! -d /root/.kube ]; then + mkdir -p /root/.kube + fi + cp -f /etc/kubernetes/admin.conf /root/.kube/config + when: kube_node_info_important.stderr != "" + diff --git a/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml b/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml new file mode 100644 index 00000000..167a53f0 --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml @@ -0,0 +1,69 @@ +--- +- name: Check if helm is installed + ignore_errors: true + command: helm version + register: helm_install_version + +- name: Sync helm to remote + copy: + src: "{{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ binary_type.stdout }}/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz" + dest: "/tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz" + when: helm_install_version.stderr != "" + +- name: Install helm + command: | + tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ binary_type.stdout }}/helm + when: helm_install_version.stderr != "" + +- name: Check if kubeadm is installed + ignore_errors: true + command: kubeadm version + register: kubeadm_install_version + +- name: Sync kubeadm to remote + copy: + src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubeadm" + dest: "/usr/local/bin/kubeadm" + mode: 0755 + when: kubeadm_install_version.stderr != "" + +- name: Check if kubectl is installed + ignore_errors: true + command: kubectl version + register: kubectl_install_version + +- name: Sync kubectl to remote + copy: + src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubectl" + dest: "/usr/local/bin/kubectl" + mode: 0755 + when: kubectl_install_version.stderr != "" + + +- name: Check if kubelet is installed + ignore_errors: true + command: systemctl status kubelet + register: kubelet_install_version + +- name: Sync kubelet to remote + copy: + src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubelet" + dest: "/usr/local/bin/kubelet" + mode: 0755 + when: kubelet_install_version.stderr != "" + +- name: Sync kubelet env to remote + template: + src: "kubeadm/kubelet.env" + dest: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + when: kubelet_install_version.stderr != "" + +- name: Sync kubelet service to remote + copy: + src: "kubelet.service" + dest: "/etc/systemd/system/kubelet.service" + when: kubelet_install_version.stderr != "" + +- name: Register kubelet service + command: systemctl daemon-reload && systemctl enable kubelet.service + when: kubelet_install_version.stderr != "" diff --git a/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml new file mode 100644 index 00000000..e809f5bd --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml @@ -0,0 +1,38 @@ +--- +- name: Generate kubeadm join config + template: + src: kubeadm/{% if (kube_version|version:">=v1.24.0") %}kubeadm-join.v1beta3{% else %}kubeadm-join.v1beta2{% endif %} + dest: /etc/kubernetes/kubeadm-config.yaml + +- name: Sync audit policy file to remote + copy: + src: "audit" + dest: "/etc/kubernetes/audit/" + when: + - kubernetes.audit + +- name: Join kubernetes cluster + block: + - name: Join kubernetes by kubeadm + command: | + /usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull + rescue: + - name: Reset kubeadm if init failed + command: kubeadm reset -f {% if (cri.cri_socket|defined && cri.cri_socket != "") %}--cri-socket {{ cri.cri_socket }}{% endif %} + +- name: Sync kubeconfig to remote + copy: + src: "{{ work_dir }}/kubekey/kubeconfig" + dest: /root/.kube/config + +- name: Remote master taint + ignore_errors: true + command: | + /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule- + when: inventory_name in groups['kube_control_plane'] + +- name: Add work label + command: | + /usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker= + when: inventory_name in groups['kube_worker'] diff --git a/builtin/roles/install/kubernetes/tasks/main.yaml b/builtin/roles/install/kubernetes/tasks/main.yaml new file mode 100644 index 00000000..9ffe340f --- /dev/null +++ b/builtin/roles/install/kubernetes/tasks/main.yaml @@ -0,0 +1,64 @@ +--- +- name: Check kubernetes if installed + ignore_errors: true + command: kubectl get node --field-selector metadata.name={{ inventory_name }} + register: kube_node_info_important + +- include_tasks: install_kube_binaries.yaml + +- include_tasks: deploy_kube_vip.yaml + when: + - kubernetes.kube_vip.enabled + - inventory_name in groups['kube_control_plane'] + +- name: Select init kubernetes node + run_once: true + set_fact: + init_kubernetes_node: "{{ groups['kube_control_plane']|first }}" + +- name: Init kubernetes + when: inventory_name == init_kubernetes_node + block: + - include_tasks: init_kubernetes.yaml + when: kube_node_info_important.stderr != "" + - include_tasks: deploy_cluster_dns.yaml + - name: Fetch kubeconfig to local + fetch: + src: /etc/kubernetes/admin.conf + dest: "{{ work_dir }}/kubekey/kubeconfig" + - name: Generate certificate key + block: + - name: Generate certificate key by kubeadm + command: /usr/local/bin/kubeadm certs certificate-key + register: kubeadm_cert_result + - name: Set_Fact certificate key to all hosts + set_fact: + kubeadm_cert: "{{ kubeadm_cert_result.stdout }}" + - name: Generate kubeadm token + block: + - name: Generate token by kubeadm + command: /usr/local/bin/kubeadm token create + register: kubeadm_token_result + - name: Set_Fact token to all hosts + set_fact: + kubeadm_token: "{{ kubeadm_token_result.stdout }}" + - name: Set_Fact init endpoint + set_fact: + init_kubernetes_endpoint: "{{ inventory_name }}" + +- include_tasks: join_kubernetes.yaml + when: + - kube_node_info_important.stderr != "" + - inventory_name != init_kubernetes_node + +- include_tasks: deploy_haproxy.yaml + when: + - kubernetes.haproxy.enabled + - inventory_name in groups['kube_worker'] + +- name: Add custom label to cluster + command: | + {% for k,v in kubernetes.custom_label %} + /usr/local/bin/kubectl label --overwrite node {{ inventory_name }} {{ k }}={{ v }} + {% endfor %} + when: kubernetes.custom_label | length > 0 diff --git a/builtin/roles/install/kubernetes/templates/dns/coredns.deployment b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment new file mode 100644 index 00000000..7a039515 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment @@ -0,0 +1,241 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: Reconcile + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + addonmanager.kubernetes.io/mode: Reconcile + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + createdby: 'kubekey' +spec: + selector: + k8s-app: kube-dns + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "coredns" + namespace: kube-system + labels: + k8s-app: "kube-dns" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "coredns" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 10% + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + createdby: 'kubekey' + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + priorityClassName: system-cluster-critical + serviceAccountName: coredns + nodeSelector: + kubernetes.io/os: linux + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: kube-dns + topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: In + values: + - "" + containers: + - name: coredns + image: "{{ kubernetes.networking.dns_image }}" + imagePullPolicy: IfNotPresent + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 300Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | + {% for ez in kubernetes.coredns.zone_configs %} + {{ ez.zones|join:" " }} { + cache {{ ez.cache }} + {% for c in ez.additional_configs %} + {{ c }} + {% endfor %} + + {% for r in ez.rewrite %} + rewrite {{ r.rule }} { + {{ r.field }} {{ r.type }} {{ r.value }} + {{ r.options }} + } + {% endfor %} + + health { + lameduck 5s + } + + {% if (ez.kubernetes.zones|defined) %} + kubernetes {{ ez.kubernetes.zones|join:" " }} in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + {% endif %} + + {% for f in ez.forward %} + forward {{ f.from }} {{ f.to|join:" " }} { + {% if (f.except|length > 0) %}except {{ f.except|join:" " }}{% endif %} + {% if (f.force_tcp) %}force_tcp{% endif %} + {% if (f.prefer_udp) %}prefer_udp{% endif %} + {% if (f.max_fails|defined) %}max_fails {{ f.max_fails }}{% endif %} + {% if (f.expire|defined) %}expire {{ f.expire }}{% endif %} + {% if (f.tls|defined) %}tls {{ f.tls.cert_file }} {{ f.tls.key_file }} {{ f.tls.ca_file }}{% endif %} + {% if (f.tls_servername|defined) %}tls_servername {{ f.tls_servername }}{% endif %} + {% if (f.policy|defined) %}policy {{ f.policy }}{% endif %} + {% if (f.health_check|defined) %}health_check {{ f.health_check }}{% endif %} + {% if (f.max_concurrent|defined) %}max_concurrent {{ f.max_concurrent }}{% endif %} + } + {% endfor %} + + {% if (kubernetes.coredns.dns_etc_hosts|length > 0) %} + hosts /etc/coredns/hosts { + allthrough + } + {% endif %} + } + {% endfor %} + + {% if (kubernetes.coredns.dns_etc_hosts|length > 0) %} + hosts: | + {% for h in kubernetes.coredns.dns_etc_hosts %} + {{ h }} + {% endfor %} + {% endif %} diff --git a/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset new file mode 100644 index 00000000..ddfa774d --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset @@ -0,0 +1,237 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nodelocaldns + namespace: kube-system + labels: + k8s-app: kube-dns + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nodelocaldns + template: + metadata: + labels: + k8s-app: nodelocaldns + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9253' + spec: + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: nodelocaldns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - effect: NoSchedule + operator: "Exists" + - effect: NoExecute + operator: "Exists" + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: node-cache + image: {{ kubernetes.networking.dns_cache_image }} + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: 169.254.25.10 + path: /health + port: 9254 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + readinessProbe: + httpGet: + host: 169.254.25.10 + path: /health + port: 9254 + scheme: HTTP + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 10 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: config-volume + configMap: + name: nodelocaldns + items: + - key: Corefile + path: Corefile +{{- if .DNSEtcHosts }} + - key: hosts + path: hosts +{{ end }} + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + updateStrategy: + rollingUpdate: + maxUnavailable: 20% + type: RollingUpdate + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nodelocaldns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists + +data: + Corefile: | +{% for ez in kubernetes.coredns.external_zones %} +{{ ez.zones|join:" " }}{ + log + errors + loadbalance + cache {{ ez.cache }} + reload + loop + bind 169.254.25.10 + prometheus :9253 + +{% for r in ez.rewrite %} + rewrite {{ r.rule }} { + {{ r.field }} {{ r.type }} {{ r.value }} + {{ r.options }} + } +{% endfor %} + +{% for f in ez.forward %} + forward {{ f.from }} {{ f.to|join:" " }} { +{% if (f.except|length > 0) %} + except {{ f.except|join:" " }} +{% endif %} +{% if (f.force_tcp) %} + force_tcp +{% endif %} +{% if (f.prefer_udp) %} + prefer_udp +{% endif %} + max_fails {{ f.max_fails|default_if_none:2 }} + expire {{ f.expire|default_if_none:"10s" }} +{% if (f.tls|defined) %} + tls {{ f.tls.cert_file|default_if_none:'""' }} {{ f.tls.key_file|default_if_none:'""' }} {{ f.tls.ca_file|default_if_none:'""' }} +{% endif %} +{% if (f.tls_servername|defined) %} + tls_servername {{ f.tls_servername }} +{% endif %} +{% if (f.policy|defined) %} + policy {{ f.policy }} +{% endif %} +{% if (f.health_check|defined) %} + health_check {{ f.health_check }} +{% endif %} +{% if (f.max_concurrent|defined) %} + max_concurrent {{ f.max_concurrent }} +{% endif %} + } +{% endfor %} + +{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% endfor %} + + + {{ kubernetes.networking.dns_domain }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind 169.254.25.10 + forward . {% if (core_dns_ip.stdout!='') %}core_dns_ip.stdout{% else %}{{ kubernetes.networking.service_cidr|ip_range|slice:":3"|last }}{% endif %} { + force_tcp + } + prometheus :9253 + health 169.254.25.10:9254 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind 169.254.25.10 + forward . {% if (core_dns_ip.stdout!='') %}core_dns_ip.stdout{% else %}{{ kubernetes.networking.service_cidr|ip_range|slice:":3"|last }}{% endif %} { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind 169.254.25.10 + forward . {% if (core_dns_ip.stdout!='') %}core_dns_ip.stdout{% else %}{{ kubernetes.networking.service_cidr|ip_range|slice:":3"|last }}{% endif %} { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind 169.254.25.10 + forward . /etc/resolv.conf + prometheus :9253 +{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %} + hosts /etc/coredns/hosts { + fallthrough + } +{% endif %} + } +{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %} + hosts: | +{% for h in kubernetes.coredns.dns_etc_hosts %} + {{ h }} +{% endfor %} +{% endif %} diff --git a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg new file mode 100644 index 00000000..b2a00155 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg @@ -0,0 +1,41 @@ +global + maxconn 4000 + log 127.0.0.1 local0 + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 5 + timeout http-request 5m + timeout queue 5m + timeout connect 30s + timeout client 30s + timeout server 15m + timeout http-keep-alive 30s + timeout check 30s + maxconn 4000 + +frontend healthz + bind *:{{ kubernetes.haproxy.health_port }} + mode http + monitor-uri /healthz + +frontend kube_api_frontend + bind 127.0.0.1:{{ kubernetes.apiserver.port }} + mode tcp + option tcplog + default_backend kube_api_backend + +backend kube_api_backend + mode tcp + balance leastconn + default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 + option httpchk GET /healthz + http-check expect status 200 +{%for h in groups['kube_control_plane'] %} + server {{ h.inventory_name }} {{ h.internal_ipv4 }}:{{ kubernetes.apiserver.port }} check check-ssl verify none +{% endfor %} diff --git a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml new file mode 100644 index 00000000..b50e3eed --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: kube-haproxy + annotations: + cfg-checksum: "{{ cfg_md5.stdout }}" +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + containers: + - name: haproxy + image: {{ kubernetes.haproxy.image }} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 25m + memory: 32M + livenessProbe: + httpGet: + path: /healthz + port: {{ kubernetes.haproxy.health_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ kubernetes.haproxy.health_port }} + volumeMounts: + - mountPath: /usr/local/etc/haproxy/ + name: etc-haproxy + readOnly: true + volumes: + - name: etc-haproxy + hostPath: + path: /etc/kubekey/haproxy diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 new file mode 100644 index 00000000..f239c3e4 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 @@ -0,0 +1,199 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +etcd: +{% if (kubernetes.etcd.deployment_type=='internal') %} + local: + {% set etcd_image_info=kubernetes.etcd.image|split:":" %} + imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }} + imageTag: {{ etcd_image_info[1] }} + serverCertSANs: + {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %} + - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }} + {% endfor %} +{% else %} + external: + endpoints: + {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %} + - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }} + {% endfor %} + caFile: /etc/kubernetes/pki/etcd/ca.crt + certFile: /etc/kubernetes/pki/etcd/client.crt + keyFile: /etc/kubernetes/pki/etcd/client.key +{% endif %} +dns: + type: CoreDNS + {% set core_image_info=kubernetes.networking.dns_image|split:":" %} + imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }} + imageTag: {{ core_image_info[1] }} +imageRepository: {{ kubernetes.image_repository }} +kubernetesVersion: {{ kube_version }} +certificatesDir: /etc/kubernetes/pki +clusterName: {{ kubernetes.cluster_name }} +controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %} +networking: + dnsDomain: {{ kubernetes.networking.dns_domain }} + podSubnet: {{ kubernetes.networking.pod_cidr }} + serviceSubnet: {{ kubernetes.networking.service_cidr }} +apiServer: + extraArgs: +{% if (kubernetes.security_enhancement == "true") %} + authorization-mode: Node,RBAC + enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity + profiling: false + request-timeout: 120s + service-account-lookup: true + tls-min-version: VersionTLS12 + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +{% endif %} +{% if (kubernetes.audit) %} + audit-log-format: json + audit-log-maxbackup: 2 + audit-log-maxsize: 200 + audit-policy-file: /etc/kubernetes/audit/policy.yaml + audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml +{% endif %} +{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }} + certSANs: + - kubernetes + - kubernetes.default + - kubernetes.default.svc + - localhost + - 127.0.0.1 + - {{ kubernetes.networking.service_cidr|ip_range|slice:":1"|last }} + - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %} + - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %} + - {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }} + {% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %} + - {{ h }}.{{ kubernetes.networking.dns_domain }} + - {{ hv.internal_ipv4 }} + {% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %} + {% endfor %} + {% for h in kubernetes.apiserver.certSANs %} + - {{ h }} + {% endfor %} +{% if (kubernetes.audit) %} + extraVolumes: + - name: k8s-audit + hostPath: /etc/kubernetes/audit + mountPath: /etc/kubernetes/audit + pathType: DirectoryOrCreate +{% endif %} +controllerManager: + extraArgs: +{% if (internal_ipv6|defined) %} + node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}" + node-cidr-mask-size-ipv6: "64" +{% else %} + node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}" +{% endif %} +{% if (kube_version|version:'>=v1.9.0') %} + cluster-signing-duration: 87600h +{% else %} + experimental-cluster-signing-duration: 87600h +{% endif %} +{% if (kubernetes.security_enhancement == "true") %} + bind-address: 127.0.0.1 + profiling: false + terminated-pod-gc-threshold: 50 + use-service-account-credentials: true +{% else %} + bind-address: 0.0.0.0 +{% endif %} +{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }} + extraVolumes: + - name: host-time + hostPath: /etc/localtime + mountPath: /etc/localtime + readOnly: true +scheduler: + extraArgs: +{% if (kubernetes.security_enhancement == "true") %} + bind-address: 127.0.0.1 + profiling: false +{% else %} + bind-address: 0.0.0.0 +{% endif %} +{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }} + +--- + +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: {{ internal_ipv4 }} + bindPort: {{ kubernetes.apiserver.port }} +nodeRegistration: + criSocket: {{ cri.cri_socket }} + kubeletExtraArgs: + cgroup-driver: {{ cri.cgroup_driver }} + +--- + +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: {{ kubernetes.networking.pod_cidr }} +mode: {{ kubernetes.kube_proxy.mode }} +{{ kubernetes.kube_proxy.config|to_yaml|safe }} + +--- + +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDomain: {{ kubernetes.networking.dns_domain }} +clusterDNS: + - {{ kubernetes.networking.dns_service_ip }} +maxPods: {{ kubernetes.max_pods }} +podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }} +rotateCertificates: true +kubeReserved: + cpu: 200m + memory: 250Mi +systemReserved: + cpu: 200m + memory: 250Mi +evictionHard: + memory.available: 5% + pid.available: 10% +evictionSoft: + memory.available: 10% +evictionSoftGracePeriod: + memory.available: 2m +evictionMaxPodGracePeriod: 120 +evictionPressureTransitionPeriod: 30s + +{% if (kubernetes.security_enhancement == "true") %} +readOnlyPort: 0 +protectKernelDefaults: true +eventRecordQPS: 1 +streamingConnectionIdleTimeout: 5m +makeIPTablesUtilChains: true +tlsCipherSuites: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +featureGates: + RotateKubeletServerCertificate: true + SeccompDefault: true +{% if (kube_version|version:">=v1.24.0") %} + TTLAfterFinished: true +{% endif %} +{% if (kube_version|version:">=v1.21.0") %} + CSIStorageCapacity: true +{% endif %} +{{ features|to_yaml:2|safe }} +{% else %} +featureGates: + RotateKubeletServerCertificate: true +{% if (kube_version|version:">=v1.24.0") %} + TTLAfterFinished: true +{% endif %} +{% if (kube_version|version:">=v1.21.0") %} + CSIStorageCapacity: true + ExpandCSIVolumes: true +{% endif %} +{{ features|to_yaml:2|safe }} +{% endif %} +cgroupDriver: {{ cri.cgroup_driver }} +containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }} +containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 new file mode 100644 index 00000000..3e11bf87 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 @@ -0,0 +1,198 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +etcd: +{% if (kubernetes.etcd.deployment_type=='internal') %} + local: + {% set etcd_image_info=kubernetes.etcd.image|split:":" %} + imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }} + imageTag: {{ etcd_image_info[1] }} + serverCertSANs: + {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %} + - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }} + {% endfor %} +{% else %} + external: + endpoints: + {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %} + - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }} + {% endfor %} + caFile: /etc/kubernetes/pki/etcd/ca.crt + certFile: /etc/kubernetes/pki/etcd/client.crt + keyFile: /etc/kubernetes/pki/etcd/client.key +{% endif %} +dns: + {% set core_image_info=kubernetes.networking.dns_image|split:":" %} + imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }} + imageTag: {{ core_image_info[1] }} +imageRepository: {{ kubernetes.image_repository }} +kubernetesVersion: {{ kube_version }} +certificatesDir: /etc/kubernetes/pki +clusterName: {{ kubernetes.cluster_name }} +controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %} +networking: + dnsDomain: {{ kubernetes.networking.dns_domain }} + podSubnet: {{ kubernetes.networking.pod_cidr }} + serviceSubnet: {{ kubernetes.networking.service_cidr }} +apiServer: + extraArgs: +{% if (kubernetes.security_enhancement == "true") %} + authorization-mode: Node,RBAC + enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity + profiling: false + request-timeout: 120s + service-account-lookup: true + tls-min-version: VersionTLS12 + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +{% endif %} +{% if (kubernetes.audit) %} + audit-log-format: json + audit-log-maxbackup: 2 + audit-log-maxsize: 200 + audit-policy-file: /etc/kubernetes/audit/policy.yaml + audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml +{% endif %} +{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }} + certSANs: + - kubernetes + - kubernetes.default + - kubernetes.default.svc + - localhost + - 127.0.0.1 + - {{ kubernetes.networking.service_cidr|ip_range|slice:":1"|last }} + - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %} + - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %} + - {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }} + {% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %} + - {{ h }}.{{ kubernetes.networking.dns_domain }} + - {{ hv.internal_ipv4 }} + {% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %} + {% endfor %} + {% for h in kubernetes.apiserver.certSANs %} + - {{ h }} + {% endfor %} +{% if (kubernetes.audit) %} + extraVolumes: + - name: k8s-audit + hostPath: /etc/kubernetes/audit + mountPath: /etc/kubernetes/audit + pathType: DirectoryOrCreate +{% endif %} +controllerManager: + extraArgs: +{% if (internal_ipv6|defined) %} + node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}" + node-cidr-mask-size-ipv6: "64" +{% else %} + node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}" +{% endif %} +{% if (kube_version|version:'>=v1.9.0') %} + cluster-signing-duration: 87600h +{% else %} + experimental-cluster-signing-duration: 87600h +{% endif %} +{% if (kubernetes.security_enhancement == "true") %} + bind-address: 127.0.0.1 + profiling: false + terminated-pod-gc-threshold: 50 + use-service-account-credentials: true +{% else %} + bind-address: 0.0.0.0 +{% endif %} +{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }} + extraVolumes: + - name: host-time + hostPath: /etc/localtime + mountPath: /etc/localtime + readOnly: true +scheduler: + extraArgs: +{% if (kubernetes.security_enhancement == "true") %} + bind-address: 127.0.0.1 + profiling: false +{% else %} + bind-address: 0.0.0.0 +{% endif %} +{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }} + +--- + +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: {{ internal_ipv4 }} + bindPort: {{ kubernetes.apiserver.port }} +nodeRegistration: + criSocket: {{ cri.cri_socket }} + kubeletExtraArgs: + cgroup-driver: {{ cri.cgroup_driver }} + +--- + +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: {{ kubernetes.networking.pod_cidr }} +mode: {{ kubernetes.kube_proxy.mode }} +{{ kubernetes.kube_proxy.config|to_yaml|safe }} + +--- + +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDomain: {{ kubernetes.networking.dns_domain }} +clusterDNS: + - {{ kubernetes.networking.dns_service_ip }} +maxPods: {{ kubernetes.max_pods }} +podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }} +rotateCertificates: true +kubeReserved: + cpu: 200m + memory: 250Mi +systemReserved: + cpu: 200m + memory: 250Mi +evictionHard: + memory.available: 5% + pid.available: 10% +evictionSoft: + memory.available: 10% +evictionSoftGracePeriod: + memory.available: 2m +evictionMaxPodGracePeriod: 120 +evictionPressureTransitionPeriod: 30s + +{% if (kubernetes.security_enhancement == "true") %} +readOnlyPort: 0 +protectKernelDefaults: true +eventRecordQPS: 1 +streamingConnectionIdleTimeout: 5m +makeIPTablesUtilChains: true +tlsCipherSuites: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +featureGates: + RotateKubeletServerCertificate: true + SeccompDefault: true +{% if (kube_version|version:">=v1.24.0") %} + TTLAfterFinished: true +{% endif %} +{% if (kube_version|version:">=v1.21.0") %} + CSIStorageCapacity: true +{% endif %} +{{ features|to_yaml:2|safe }} +{% else %} +featureGates: + RotateKubeletServerCertificate: true +{% if (kube_version|version:">=v1.24.0") %} + TTLAfterFinished: true +{% endif %} +{% if (kube_version|version:">=v1.21.0") %} + CSIStorageCapacity: true + ExpandCSIVolumes: true +{% endif %} +{{ features|to_yaml:2|safe }} +{% endif %} +cgroupDriver: {{ cri.cgroup_driver }} +containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }} +containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 new file mode 100644 index 00000000..cf213f34 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 @@ -0,0 +1,20 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +discovery: + bootstrapToken: + apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }} + token: "{{ kubeadm_token }}" + unsafeSkipCAVerification: true + tlsBootstrapToken: "{{ kubeadm_token }}" +{% if (inventory_name in groups['kube_control_plane']) %} +controlPlane: + localAPIEndpoint: + advertiseAddress: {{ internal_ipv4 }} + bindPort: {{ kubernetes.apiserver.port }} + certificateKey: {{ kubeadm_cert }} +{% endif %} +nodeRegistration: + criSocket: {{ cri.cri_socket }} + kubeletExtraArgs: + cgroup-driver: {{ cri.cgroup_driver }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 new file mode 100644 index 00000000..4ac96083 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 @@ -0,0 +1,20 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +discovery: + bootstrapToken: + apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }} + token: "{{ kubeadm_token }}" + unsafeSkipCAVerification: true + tlsBootstrapToken: "{{ kubeadm_token }}" +{% if (inventory_name in groups['kube_control_plane']) %} +controlPlane: + localAPIEndpoint: + advertiseAddress: {{ internal_ipv4 }} + bindPort: {{ kubernetes.apiserver.port }} + certificateKey: {{ kubeadm_cert }} +{% endif %} +nodeRegistration: + criSocket: {{ cri.cri_socket }} + kubeletExtraArgs: + cgroup-driver: {{ cri.cgroup_driver }} diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env new file mode 100644 index 00000000..8d451f19 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env @@ -0,0 +1,13 @@ +# Note: This dropin only works with kubeadm and kubelet v1.11+ +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generate at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/default/kubelet +Environment="KUBELET_EXTRA_ARGS=--node-ip={{ internal_ipv4 }} --hostname-override={{ inventory_name }} {%for k,v in kubernetes.kubelet.extra_args %}--{{k}} {{v}} {% endfor %}" +ExecStart= +ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS + diff --git a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP new file mode 100644 index 00000000..a9e8a4ca --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: {{ interface.stdout }} + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: lb_enable + value: "true" + - name: lb_port + value: "6443" + - name: address + value: {{ kubernetes.control_plane_endpoint }} + image: {{ kubernetes.kubevip.image }} + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_TIME + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} diff --git a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP new file mode 100644 index 00000000..31e1bc31 --- /dev/null +++ b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP @@ -0,0 +1,72 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "false" + - name: port + value: "6443" + - name: vip_interface + value: {{ interface.stdout }} + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: bgp_enable + value: "true" + - name: bgp_routerid + value: {% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %} + - name: bgp_as + value: "65000" + - name: bgp_peeraddress + - name: bgp_peerpass + - name: bgp_peeras + value: "65000" + - name: bgp_peers + value: {{ .BGPPeers }} + - name: lb_enable + value: "true" + - name: lb_port + value: "6443" + - name: lb_fwdmethod + value: local + - name: address + value: {{ kubernetes.control_plane_endpoint }} + - name: prometheus_server + value: :2112 + image: {{ kubernetes.kubevip.image }} + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} diff --git a/builtin/roles/install/nfs/defaults/main.yaml b/builtin/roles/install/nfs/defaults/main.yaml new file mode 100644 index 00000000..c02abb0c --- /dev/null +++ b/builtin/roles/install/nfs/defaults/main.yaml @@ -0,0 +1,3 @@ +nfs: + share_dir: + - /share/ diff --git a/builtin/roles/install/nfs/tasks/debian.yaml b/builtin/roles/install/nfs/tasks/debian.yaml new file mode 100644 index 00000000..bbeba5d8 --- /dev/null +++ b/builtin/roles/install/nfs/tasks/debian.yaml @@ -0,0 +1,28 @@ +--- +- name: Check if nfs is installed + ignore_errors: true + command: systemctl status nfs-kernel-server + register: nfs_server_install + +- name: Install nfs + command: apt update && apt install -y nfs-kernel-server + when: nfs_server_install.stderr != "" + +- name: Create nfs share directory + command: | + if [ ! -d {{ item }} ]; then + mkdir -p {{ item }} + chmod -R 0755 {{ item }} + chown nobody:nogroup {{ item }} + fi + loop: "{{ nfs.share_dir }}" + +- name: Generate nfs config + template: + src: exports + dest: /etc/exports + +- name: Export share directory and start nfs server + command: | + exportfs -a + systemctl enable nfs-kernel-server && systemctl restart nfs-kernel-server diff --git a/builtin/roles/install/nfs/tasks/main.yaml b/builtin/roles/install/nfs/tasks/main.yaml new file mode 100644 index 00000000..bec3f2ae --- /dev/null +++ b/builtin/roles/install/nfs/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include_tasks: debian.yaml + when: os.release.ID_LIKE == 'debian' + +- include_tasks: rhel.yaml + when: os.release.ID_LIKE == 'rhel fedora' diff --git a/builtin/roles/install/nfs/tasks/rhel.yaml b/builtin/roles/install/nfs/tasks/rhel.yaml new file mode 100644 index 00000000..d770c566 --- /dev/null +++ b/builtin/roles/install/nfs/tasks/rhel.yaml @@ -0,0 +1,28 @@ +--- +- name: Check if nfs is installed + ignore_errors: true + command: systemctl status nfs-server + register: nfs_server_install + +- name: Install nfs + command: yum update && yum install -y nfs-utils + when: nfs_server_install.stderr != "" + +- name: Create nfs share directory + command: | + if [ ! -d {{ item }} ]; then + mkdir -p {{ item }} + chmod -R 0755 {{ item }} + chown nobody:nobody {{ item }} + fi + loop: "{{ nfs.share_dir }}" + +- name: Generate nfs config + template: + src: exports + dest: /etc/exports + +- name: Export share directory and start nfs server + command: | + exportfs -a + systemctl enabled nfs-server.service && systemctl restart nfs-server.service diff --git a/builtin/roles/install/nfs/templates/exports b/builtin/roles/install/nfs/templates/exports new file mode 100644 index 00000000..ef2ecc04 --- /dev/null +++ b/builtin/roles/install/nfs/templates/exports @@ -0,0 +1,3 @@ +{% for p in nfs.share_dir %} +{{ p }} *(rw,sync,no_subtree_check) +{% endfor %} diff --git a/builtin/roles/install/security/defaults/main.yaml b/builtin/roles/install/security/defaults/main.yaml new file mode 100644 index 00000000..7376ffeb --- /dev/null +++ b/builtin/roles/install/security/defaults/main.yaml @@ -0,0 +1 @@ +security_enhancement: false diff --git a/builtin/roles/install/security/tasks/main.yaml b/builtin/roles/install/security/tasks/main.yaml new file mode 100644 index 00000000..f4ffba87 --- /dev/null +++ b/builtin/roles/install/security/tasks/main.yaml @@ -0,0 +1,39 @@ +--- +- name: security enhancement for etcd + command: | + chmod 700 /etc/ssl/etcd/ssl && chown root:root /etc/ssl/etcd/ssl + chmod 600 /etc/ssl/etcd/ssl/* && chown root:root /etc/ssl/etcd/ssl/* + chmod 700 /var/lib/etcd && chown etcd:etcd /var/lib/etcd + chmod 550 /usr/local/bin/etcd* && chown root:root /usr/local/bin/etcd* + when: inventory_name in groups['etcd'] + +- name: security enhancement for control plane + command: | + chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes + chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/* + chmod 644 /etc/kubernetes/manifests && chown root:root /etc/kubernetes/manifests + chmod 644 /etc/kubernetes/pki && chown root:root /etc/kubernetes/pki + chmod 600 -R /etc/cni/net.d && chown root:root -R /etc/cni/net.d + chmod 550 /usr/local/bin/ && chown root:root /usr/local/bin/ + chmod 550 -R /usr/local/bin/kube* && chown root:root -R /usr/local/bin/kube* + chmod 550 /usr/local/bin/helm && chown root:root /usr/local/bin/helm + chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin + chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml + chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service* + chmod 640 /etc/systemd/system/k8s-certs-renew* && chown root:root /etc/systemd/system/k8s-certs-renew* + when: inventory_name in groups['kube_control_plane'] + +- name: security enhancement for worker + command: | + chmod 644 /etc/kubernetes && chown root:root /etc/kubernetes + chmod 600 -R /etc/kubernetes && chown root:root -R /etc/kubernetes/* + chmod 644 /etc/kubernetes/manifests && chown root:root /etc/kubernetes/manifests + chmod 644 /etc/kubernetes/pki && chown root:root /etc/kubernetes/pki + chmod 600 -R /etc/cni/net.d && chown root:root -R /etc/cni/net.d + chmod 550 /usr/local/bin/ && chown root:root /usr/local/bin/ + chmod 550 -R /usr/local/bin/kube* && chown root:root -R /usr/local/bin/kube* + chmod 550 /usr/local/bin/helm && chown root:root /usr/local/bin/helm + chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin + chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml + chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service* + when: inventory_name in groups['kube_worker'] diff --git a/builtin/roles/precheck/artifact_check/tasks/main.yaml b/builtin/roles/precheck/artifact_check/tasks/main.yaml new file mode 100644 index 00000000..37f3ae39 --- /dev/null +++ b/builtin/roles/precheck/artifact_check/tasks/main.yaml @@ -0,0 +1,21 @@ +--- +- name: Check artifact is exits + command: + if [ ! -f "{{ artifact.artifact_file }}" ]; then + exit 1 + fi + +- name: Check artifact file type + command: + if [[ "{{ artifact.artifact_file }}" != *{{ item }} ]]; then + exit 1 + fi + loop: ['.tgz','.tar.gz'] + +- name: Check md5 of artifact + command: + if [[ $(md5sum {{ artifact.artifact_file }}) != {{ artifact.artifact_md5 }} ]]; then + exit 1 + fi + when: + - artifact.artifact_md5 | defined diff --git a/builtin/roles/precheck/env_check/defaults/main.yaml b/builtin/roles/precheck/env_check/defaults/main.yaml new file mode 100644 index 00000000..39e46e9c --- /dev/null +++ b/builtin/roles/precheck/env_check/defaults/main.yaml @@ -0,0 +1,25 @@ +cluster_require: + allow_unsupported_distribution_setup: false + # support ubuntu, centos. + supported_os_distributions: ['ubuntu', 'centos'] + require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn'] + # the minimal version of kubernetes to be installed. + kube_version_min_required: v1.19.10 + # memory size for each kube_control_plane node.(unit kB) + # should be greater than or equal to minimal_master_memory_mb. + minimal_master_memory_mb: 10 + # memory size for each kube_worker node.(unit kB) + # should be greater than or equal to minimal_node_memory_mb. + minimal_node_memory_mb: 10 + require_etcd_deployment_type: ['internal','external'] + require_container_manager: ['docker', 'containerd'] + require_containerd_version: ['latest', 'edge', 'stable'] + # the minimal required version of containerd to be installed. + containerd_min_version_required: v1.6.0 + supported_architectures: + amd64: + - amd64 + - x86_64 + arm64: + - arm64 + - aarch64 diff --git a/builtin/roles/precheck/env_check/tasks/main.yaml b/builtin/roles/precheck/env_check/tasks/main.yaml new file mode 100644 index 00000000..3784388d --- /dev/null +++ b/builtin/roles/precheck/env_check/tasks/main.yaml @@ -0,0 +1,115 @@ +--- +- name: Stop if etcd deployment type is not internal or external + assert: + that: kubernetes.etcd.deployment_type in cluster_require.require_etcd_deployment_type + fail_msg: "The etcd deployment type, 'kubernetes.etcd.deployment_type', must be internal or external" + run_once: true + when: kubernetes.etcd.deployment_type | defined + +- name: Stop if etcd group is empty in internal etcd mode + assert: + that: "'etcd' in groups" + fail_msg: "Group 'etcd' cannot be empty in external etcd mode" + run_once: true + when: + - kubernetes.etcd.deployment_type == "external" + +- name: Stop if the os does not support + assert: + that: (cluster_require.allow_unsupported_distribution_setup) or (os.release.ID in cluster_require.supported_os_distributions) + fail_msg: "{{ os.release.ID }} is not a known OS" + +- name: Stop if unknown network plugin + assert: + that: kubernetes.kube_network_plugin in cluster_require.require_network_plugin + fail_msg: "{{ kubernetes.kube_network_plugin }} is not supported" + when: + - kubernetes.kube_network_plugin | defined + +- name: Stop if unsupported version of Kubernetes + assert: + that: kube_version | version:'>={{ cluster_require.kube_version_min_required }}' + fail_msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}" + when: + - kube_version | defined + +- name: Stop if even number of etcd hosts + assert: + that: not groups.etcd | length | divisibleby:2 + when: + - inventory_name in groups['etcd'] + +- name: Stop if memory is too small for masters + assert: + that: process.memInfo.MemTotal | cut:' kB' >= cluster_require.minimal_master_memory_mb + when: + - inventory_name in groups['kube_control_plane'] + +- name: Stop if memory is too small for nodes + assert: + that: process.memInfo.MemTotal | cut:' kB' >= cluster_require.minimal_node_memory_mb + when: + - inventory_name in groups['kube_worker'] + +# This assertion will fail on the safe side: One can indeed schedule more pods +# on a node than the CIDR-range has space for when additional pods use the host +# network namespace. It is impossible to ascertain the number of such pods at +# provisioning time, so to establish a guarantee, we factor these out. +# NOTICE: the check blatantly ignores the inet6-case +- name: Guarantee that enough network address space is available for all pods + assert: + that: "(kubernetes.kubelet.max_pods | integer) <= (2 | pow: {{ 32 - kubernetes.controller_manager.kube_network_node_prefix | integer }} - 2)" + fail_msg: "Do not schedule more pods on a node than inet addresses are available." + when: + - inventory_name in groups['k8s_cluster'] + - kubernetes.controller_manager.kube_network_node_prefix | defined + - kubernetes.kube_network_plugin != 'calico' + +#- name: Stop if access_ip is not pingable +# command: ping -c1 {{ access_ip }} +# when: +# - access_ip | defined +# - ping_access_ip +# changed_when: false + +- name: Stop if kernel version is too low + assert: + that: os.kernel_version | split:'-' | first | version:'>=4.9.17' + when: + - kubernetes.kube_network_plugin == 'cilium' +# - kubernetes.kube_network_plugin == 'cilium' or (cilium_deploy_additionally | default:false) + +- name: Stop if bad hostname + vars: + regex: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' + assert: + that: inventory_name | match:regex + fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" + +- name: Stop if container manager is not docker or containerd + assert: + that: cri.container_manager in cluster_require.require_container_manager + fail_msg: "The container manager:{{ cri.container_manager }}, must be docker or containerd" + run_once: true + when: cri.container_manager | defined + +- name: Ensure minimum containerd version + assert: + that: containerd_version | version:'>={{cluster_require.containerd_min_version_required}}' + fail_msg: "containerd_version is too low. Minimum version {{ cluster_require.containerd_min_version_required }}" + run_once: true + when: + - not containerd_version in cluster_require.require_containerd_version + - cri.container_manager == 'containerd' + +- name: Check os if supported + assert: + that: os.architecture in cluster_require.supported_architectures.amd64 or os.architecture in cluster_require.supported_architectures.arm64 + success_msg: "{% if (os.architecture in cluster_require.supported_architectures.amd64) %}amd64{% else %}arm64{% endif %}" + register: binary_type + +- name: Stop if nfs server is not be one + assert: + that: groups['nfs'] | length == 1 + fail_msg: "Only one nfs server is supported" + when: groups['nfs'] | length > 0 diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 75bcfe08..39b937ac 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -79,4 +79,7 @@ func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets { func (o *ControllerManagerServerOptions) Complete(cmd *cobra.Command, args []string) { // do nothing + if o.MaxConcurrentReconciles == 0 { + o.MaxConcurrentReconciles = 1 + } } diff --git a/cmd/kk/app/create.go b/cmd/kk/app/create.go new file mode 100644 index 00000000..6eb375b4 --- /dev/null +++ b/cmd/kk/app/create.go @@ -0,0 +1,74 @@ +//go:build builtin +// +build builtin + +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "io/fs" + "os" + + "github.com/spf13/cobra" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + + "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" + _const "github.com/kubesphere/kubekey/v4/pkg/const" +) + +func newCreateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Short: "Create a cluster or a cluster configuration file", + } + + cmd.AddCommand(newCreateClusterCommand()) + return cmd +} + +func newCreateClusterCommand() *cobra.Command { + o := options.NewCreateClusterOptions() + cmd := &cobra.Command{ + Use: "cluster", + Short: "Create a Kubernetes or KubeSphere cluster", + RunE: func(cmd *cobra.Command, args []string) error { + pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"}) + if err != nil { + return err + } + // set workdir + _const.SetWorkDir(o.WorkDir) + // create workdir directory,if not exists + if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) { + if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil { + return err + } + } + return run(signals.SetupSignalHandler(), pipeline, config, inventory, o.CommonOptions) + }, + } + + flags := cmd.Flags() + for _, f := range o.Flags().FlagSets { + flags.AddFlagSet(f) + } + return cmd +} + +func init() { + registerInternalCommand(newCreateCommand()) +} diff --git a/cmd/kk/app/options/common.go b/cmd/kk/app/options/common.go index 3312eab8..bfa988b0 100644 --- a/cmd/kk/app/options/common.go +++ b/cmd/kk/app/options/common.go @@ -17,16 +17,19 @@ limitations under the License. package options import ( + "encoding/json" + "fmt" "os" + "path/filepath" + "strings" corev1 "k8s.io/api/core/v1" cliflag "k8s.io/component-base/cli/flag" "k8s.io/klog/v2" "sigs.k8s.io/yaml" + "github.com/kubesphere/kubekey/v4/builtin" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - _const "github.com/kubesphere/kubekey/v4/pkg/const" - "github.com/kubesphere/kubekey/v4/project" ) type CommonOptions struct { @@ -36,28 +39,66 @@ type CommonOptions struct { InventoryFile string // ConfigFile is the path of config file ConfigFile string + // Set value in config + Set []string // WorkDir is the baseDir which command find any resource (project etc.) WorkDir string // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters. Debug bool } +func newCommonOptions() CommonOptions { + o := CommonOptions{} + wd, err := os.Getwd() + if err != nil { + klog.ErrorS(err, "get current dir error") + o.WorkDir = "/tmp/kk" + } else { + o.WorkDir = wd + } + return o +} + func (o *CommonOptions) Flags() cliflag.NamedFlagSets { fss := cliflag.NamedFlagSets{} gfs := fss.FlagSet("generic") gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ") - gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ") - gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini") - gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.") + gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ") + gfs.StringSliceVar(&o.Set, "set", o.Set, "set value in config. format --set key=val") + gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini") + gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.") return fss } -func completeRef(pipeline *kubekeyv1.Pipeline, configFile string, inventoryFile string) (*kubekeyv1.Config, *kubekeyv1.Inventory, error) { - config, err := genConfig(configFile) - if err != nil { - klog.V(4).ErrorS(err, "generate config error", "file", configFile) - return nil, nil, err +func (o *CommonOptions) completeRef(pipeline *kubekeyv1.Pipeline) (*kubekeyv1.Config, *kubekeyv1.Inventory, error) { + if !filepath.IsAbs(o.WorkDir) { + wd, err := os.Getwd() + if err != nil { + return nil, nil, fmt.Errorf("get current dir error: %v", err) + } + o.WorkDir = filepath.Join(wd, o.WorkDir) } + + config, err := genConfig(o.ConfigFile) + if err != nil { + return nil, nil, fmt.Errorf("generate config error: %v", err) + } + if wd, err := config.GetValue("work_dir"); err == nil && wd != nil { + // if work_dir is defined in config, use it. otherwise use current dir. + o.WorkDir = wd.(string) + } else if err := config.SetValue("work_dir", o.WorkDir); err != nil { + return nil, nil, fmt.Errorf("work_dir to config error: %v", err) + } + for _, s := range o.Set { + ss := strings.Split(s, "=") + if len(ss) != 2 { + return nil, nil, fmt.Errorf("--set value should be k=v") + } + if err := setValue(config, ss[0], ss[1]); err != nil { + return nil, nil, fmt.Errorf("--set value to config error: %v", err) + } + } + pipeline.Spec.ConfigRef = &corev1.ObjectReference{ Kind: config.Kind, Namespace: config.Namespace, @@ -67,10 +108,9 @@ func completeRef(pipeline *kubekeyv1.Pipeline, configFile string, inventoryFile ResourceVersion: config.ResourceVersion, } - inventory, err := genInventory(inventoryFile) + inventory, err := genInventory(o.InventoryFile) if err != nil { - klog.V(4).ErrorS(err, "generate inventory error", "file", inventoryFile) - return nil, nil, err + return nil, nil, fmt.Errorf("generate inventory error: %v", err) } pipeline.Spec.InventoryRef = &corev1.ObjectReference{ Kind: inventory.Kind, @@ -93,15 +133,13 @@ func genConfig(configFile string) (*kubekeyv1.Config, error) { if configFile != "" { cdata, err = os.ReadFile(configFile) } else { - cdata, err = project.InternalPipeline.ReadFile(_const.BuiltinConfigFile) + cdata = builtin.DefaultConfig } if err != nil { - klog.V(4).ErrorS(err, "read config file error") - return nil, err + return nil, fmt.Errorf("read config file error: %v", err) } if err := yaml.Unmarshal(cdata, config); err != nil { - klog.V(4).ErrorS(err, "unmarshal config file error") - return nil, err + return nil, fmt.Errorf("unmarshal config file error: %v", err) } if config.Namespace == "" { config.Namespace = corev1.NamespaceDefault @@ -118,7 +156,7 @@ func genInventory(inventoryFile string) (*kubekeyv1.Inventory, error) { if inventoryFile != "" { cdata, err = os.ReadFile(inventoryFile) } else { - cdata, err = project.InternalPipeline.ReadFile(_const.BuiltinInventoryFile) + cdata = builtin.DefaultInventory } if err != nil { klog.V(4).ErrorS(err, "read config file error") @@ -133,3 +171,24 @@ func genInventory(inventoryFile string) (*kubekeyv1.Inventory, error) { } return inventory, nil } + +func setValue(config *kubekeyv1.Config, key, val string) error { + switch { + case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"): + var value map[string]any + err := json.Unmarshal([]byte(val), &value) + if err != nil { + return err + } + return config.SetValue(key, value) + case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"): + var value []any + err := json.Unmarshal([]byte(val), &value) + if err != nil { + return err + } + return config.SetValue(key, value) + default: + return config.SetValue(key, val) + } +} diff --git a/cmd/kk/app/options/create.go b/cmd/kk/app/options/create.go new file mode 100644 index 00000000..30e060c1 --- /dev/null +++ b/cmd/kk/app/options/create.go @@ -0,0 +1,98 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cliflag "k8s.io/component-base/cli/flag" +) + +func NewCreateClusterOptions() *CreateClusterOptions { + // set default value + return &CreateClusterOptions{CommonOptions: newCommonOptions()} +} + +type CreateClusterOptions struct { + CommonOptions + // kubernetes version which the cluster will install. + Kubernetes string + // ContainerRuntime for kubernetes. Such as docker, containerd etc. + ContainerManager string + // Artifact container all binaries which used to install kubernetes. + Artifact string +} + +func (o *CreateClusterOptions) Flags() cliflag.NamedFlagSets { + fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("kubernetes") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", "", "Specify a supported version of kubernetes") + kfs.StringVar(&o.ContainerManager, "container-manager", "", "Container runtime: docker, crio, containerd and isula.") + kfs.StringVarP(&o.Artifact, "artifact", "a", "", "Path to a KubeKey artifact") + return fss +} + +func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv1.Pipeline, *kubekeyv1.Config, *kubekeyv1.Inventory, error) { + pipeline := &kubekeyv1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "create-cluster-", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + kubekeyv1.BuiltinsProjectAnnotation: "", + }, + }, + } + + // complete playbook. now only support one playbook + if len(args) == 1 { + o.Playbook = args[0] + } else { + return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) + } + + pipeline.Spec = kubekeyv1.PipelineSpec{ + Playbook: o.Playbook, + Debug: o.Debug, + } + config, inventory, err := o.completeRef(pipeline) + if err != nil { + return nil, nil, nil, err + } + if o.Kubernetes != "" { + // override kube_version in config + if err := config.SetValue("kube_version", o.Kubernetes); err != nil { + return nil, nil, nil, err + } + } + if o.ContainerManager != "" { + // override container_manager in config + if err := config.SetValue("cri.container_manager", o.ContainerManager); err != nil { + return nil, nil, nil, err + } + } + if o.Artifact != "" { + // override artifact_file in config + if err := config.SetValue("artifact_file", o.Artifact); err != nil { + return nil, nil, nil, err + } + } + + return pipeline, config, inventory, nil +} diff --git a/cmd/kk/app/options/precheck.go b/cmd/kk/app/options/precheck.go index 0e6341ee..58094566 100644 --- a/cmd/kk/app/options/precheck.go +++ b/cmd/kk/app/options/precheck.go @@ -18,27 +18,17 @@ package options import ( "fmt" - "os" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cliflag "k8s.io/component-base/cli/flag" - "k8s.io/klog/v2" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" ) func NewPreCheckOptions() *PreCheckOptions { // set default value - o := &PreCheckOptions{} - wd, err := os.Getwd() - if err != nil { - klog.ErrorS(err, "get current dir error") - o.WorkDir = "/tmp/kk" - } else { - o.WorkDir = wd - } - return o + return &PreCheckOptions{CommonOptions: newCommonOptions()} } type PreCheckOptions struct { @@ -71,12 +61,10 @@ func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv Playbook: o.Playbook, Debug: o.Debug, } - config, inventory, err := completeRef(pipeline, o.ConfigFile, o.InventoryFile) + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err } - if err := config.SetValue("work_dir", o.WorkDir); err != nil { - return nil, nil, nil, err - } + return pipeline, config, inventory, nil } diff --git a/cmd/kk/app/options/run.go b/cmd/kk/app/options/run.go index ca6b5325..af9e7f32 100644 --- a/cmd/kk/app/options/run.go +++ b/cmd/kk/app/options/run.go @@ -27,10 +27,7 @@ import ( ) type KubekeyRunOptions struct { - // WorkDir is the baseDir which command find any resource (project etc.) - WorkDir string - // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters. - Debug bool + CommonOptions // ProjectAddr is the storage for executable packages (in Ansible format). // When starting with http or https, it will be obtained from a Git repository. // When starting with file path, it will be obtained from the local path. @@ -44,14 +41,8 @@ type KubekeyRunOptions struct { ProjectTag string // ProjectInsecureSkipTLS skip tls or not when git addr is https. ProjectInsecureSkipTLS bool - // ProjectToken auther + // ProjectToken to clone and pull git project ProjectToken string - // Playbook which to execute. - Playbook string - // HostFile is the path of host file - InventoryFile string - // ConfigFile is the path of config file - ConfigFile string // Tags is the tags of playbook which to execute Tags []string // SkipTags is the tags of playbook which skip execute @@ -60,18 +51,14 @@ type KubekeyRunOptions struct { func NewKubeKeyRunOptions() *KubekeyRunOptions { // add default values - o := &KubekeyRunOptions{} + o := &KubekeyRunOptions{ + CommonOptions: newCommonOptions(), + } return o } func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets { - fss := cliflag.NamedFlagSets{} - gfs := fss.FlagSet("generic") - gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ") - gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ") - gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini") - gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.") - + fss := o.CommonOptions.Flags() gitfs := fss.FlagSet("project") gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+ " When starting with http or https, it will be obtained from a Git repository."+ @@ -117,12 +104,10 @@ func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kubeke SkipTags: o.SkipTags, Debug: o.Debug, } - config, inventory, err := completeRef(pipeline, o.ConfigFile, o.InventoryFile) + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err } - if err := config.SetValue("work_dir", o.WorkDir); err != nil { - return nil, nil, nil, err - } + return pipeline, config, inventory, nil } diff --git a/cmd/kk/app/precheck.go b/cmd/kk/app/precheck.go index 25f153d9..7c9e8c87 100644 --- a/cmd/kk/app/precheck.go +++ b/cmd/kk/app/precheck.go @@ -1,3 +1,6 @@ +//go:build builtin +// +build builtin + /* Copyright 2023 The KubeSphere Authors. @@ -47,7 +50,7 @@ func newPreCheckCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + return run(signals.SetupSignalHandler(), pipeline, config, inventory, o.CommonOptions) }, } @@ -57,3 +60,7 @@ func newPreCheckCommand() *cobra.Command { } return cmd } + +func init() { + registerInternalCommand(newPreCheckCommand()) +} diff --git a/cmd/kk/app/run.go b/cmd/kk/app/run.go index 1e186716..4bb4ef28 100644 --- a/cmd/kk/app/run.go +++ b/cmd/kk/app/run.go @@ -18,7 +18,6 @@ package app import ( "context" - "github.com/kubesphere/kubekey/v4/pkg/proxy" "io/fs" "os" @@ -30,6 +29,7 @@ import ( kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/manager" + "github.com/kubesphere/kubekey/v4/pkg/proxy" ) func newRunCommand() *cobra.Command { @@ -51,18 +51,17 @@ func newRunCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), kk, config, inventory) + return run(signals.SetupSignalHandler(), kk, config, inventory, o.CommonOptions) }, } - fs := cmd.Flags() for _, f := range o.Flags().FlagSets { - fs.AddFlagSet(f) + cmd.Flags().AddFlagSet(f) } return cmd } -func run(ctx context.Context, kk *kubekeyv1.Pipeline, config *kubekeyv1.Config, inventory *kubekeyv1.Inventory) error { +func run(ctx context.Context, kk *kubekeyv1.Pipeline, config *kubekeyv1.Config, inventory *kubekeyv1.Inventory, o options.CommonOptions) error { if err := proxy.Init(); err != nil { return err } diff --git a/cmd/kk/app/server.go b/cmd/kk/app/server.go index def67876..9276f17c 100644 --- a/cmd/kk/app/server.go +++ b/cmd/kk/app/server.go @@ -25,6 +25,18 @@ import ( "k8s.io/klog/v2" ) +var internalCommand = []*cobra.Command{} + +func registerInternalCommand(command *cobra.Command) { + for _, c := range internalCommand { + if c.Name() == command.Name() { + // command has register. skip + return + } + } + internalCommand = append(internalCommand, command) +} + func NewKubeKeyCommand() *cobra.Command { cmd := &cobra.Command{ Use: "kk", @@ -40,6 +52,8 @@ func NewKubeKeyCommand() *cobra.Command { }, } + // todo add --set override the config.yaml data. + flags := cmd.PersistentFlags() addProfilingFlags(flags) addKlogFlags(flags) @@ -49,8 +63,7 @@ func NewKubeKeyCommand() *cobra.Command { cmd.AddCommand(newVersionCommand()) // internal command - cmd.AddCommand(newPreCheckCommand()) - cmd.AddCommand(newCreateCommand()) + cmd.AddCommand(internalCommand...) return cmd } diff --git a/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml b/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml index 0500d2e7..511180ba 100644 --- a/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml +++ b/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml @@ -207,9 +207,6 @@ spec: ignored: description: Ignored number of tasks. type: integer - skipped: - description: Skipped number of tasks. - type: integer success: description: Success number of tasks. type: integer diff --git a/example/Makefile b/example/Makefile index a24082b0..f2d0b76d 100644 --- a/example/Makefile +++ b/example/Makefile @@ -1,23 +1,27 @@ -BaseDir := $(CURDIR)/.. -playbooks := bootstrap-os.yaml +BaseDir?=$(CURDIR) +GOOS?=$(shell go env GOOS) +GOARCH?=$(shell go env GOARCH) .PHONY: build build: - go build -o $(BaseDir)/example -gcflags all=-N github.com/kubesphere/kubekey/v4/cmd/kk + GOOS=$(GOOS);GOARCH=$(GOARCH) go build -o $(BaseDir)/kk-alpha -tags builtin -gcflags all=-N github.com/kubesphere/kubekey/v4/cmd/kk .PHONY: run-playbook run-playbook: build @for pb in $(playbooks); do \ - $(BaseDir)/example/kk run --work-dir=$(BaseDir)/example/test \ + $(BaseDir)/kk-alpha run --work-dir=$(BaseDir)/test \ --project-addr=git@github.com:littleBlackHouse/kse-installer.git \ - --project-branch=demo --inventory=$(BaseDir)/example/inventory.yaml \ - --config=$(BaseDir)/example/config.yaml \ + --project-branch=demo \ --debug playbooks/$$pb;\ done .PHONY: precheck precheck: build - $(BaseDir)/example/kk precheck --work-dir=$(BaseDir)/example/test \ - --inventory=$(BaseDir)/example/inventory.yaml \ - --config=$(BaseDir)/example/config.yaml + $(BaseDir)/example/kk-alpha precheck --work-dir=$(BaseDir)/test \ + --inventory=$(BaseDir)/inventory.yaml --debug +.PHONY: create-cluster +create-cluster: build + $(BaseDir)/example/kk-alpha create cluster --work-dir=$(BaseDir)/test \ + --inventory=$(BaseDir)/inventory.yaml \ + --debug diff --git a/example/config.yaml b/example/config.yaml deleted file mode 100644 index 19482c05..00000000 --- a/example/config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: kubekey.kubesphere.io/v1 -kind: Config -metadata: - name: example -spec: - etcd_deployment_type: external - supported_os_distributions: [ ubuntu ] - kube_network_plugin: flannel - kube_version: 1.23.15 - kube_version_min_required: 1.19.10 - download_run_once: true - minimal_master_memory_mb: 10 #KB - minimal_node_memory_mb: 10 #KB - kube_network_node_prefix: 24 - container_manager: containerd - containerd_version: v1.7.0 - containerd_min_version_required: v1.6.0 - kube_external_ca_mode: true - cilium_deploy_additionally: true diff --git a/example/inventory.yaml b/example/inventory.yaml deleted file mode 100644 index d78c7967..00000000 --- a/example/inventory.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: kubekey.kubesphere.io/v1 -kind: Inventory -metadata: - name: example -spec: - hosts: - kk: - ssh_host: xxx - groups: - k8s_cluster: - groups: - - kube_control_plane - - kube_node - kube_control_plane: - hosts: - - kk - kube_node: - hosts: - - kk - etcd: - hosts: - - kk - vars: - ssh_port: xxx - ssh_user: xxx - ssh_password: xxx diff --git a/example/pipeline.yaml b/example/pipeline.yaml deleted file mode 100644 index c5f7da5f..00000000 --- a/example/pipeline.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: kubekey.kubesphere.io/v1 -kind: Pipeline -metadata: - name: precheck-example - annotations: - "kubekey.kubesphere.io/uiltins-project": "" -spec: - playbook: playbooks/precheck.yaml - inventoryRef: - apiVersion: kubekey.kubesphere.io/v1 - kind: Inventory - name: example - namespace: default - configRef: - apiVersion: kubekey.kubesphere.io/v1 - kind: Config - name: example - namespace: default diff --git a/go.mod b/go.mod index 73c211a0..f9dc0414 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/go-git/go-git/v5 v5.11.0 github.com/google/gops v0.3.28 github.com/google/uuid v1.5.0 + github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 @@ -55,7 +56,7 @@ require ( github.com/go-openapi/swag v0.22.7 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -74,7 +75,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -113,7 +113,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index b74730be..b4cb8199 100644 --- a/go.sum +++ b/go.sum @@ -94,8 +94,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= @@ -360,8 +360,8 @@ google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/apis/kubekey/v1/config_types.go b/pkg/apis/kubekey/v1/config_types.go index 999c82b8..286dac90 100644 --- a/pkg/apis/kubekey/v1/config_types.go +++ b/pkg/apis/kubekey/v1/config_types.go @@ -20,6 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" + "reflect" + "strings" ) // +genclient @@ -51,11 +53,32 @@ func (c *Config) SetValue(key string, value any) error { return err } // set value - configMap[key] = value - data, err := json.Marshal(configMap) + var f func(input map[string]any, key []string, value any) any + f = func(input map[string]any, key []string, value any) any { + if len(key) == 1 { + input[key[0]] = value + } else if len(key) > 1 { + if v, ok := input[key[0]]; ok && reflect.TypeOf(v).Kind() == reflect.Map { + input[key[0]] = f(v.(map[string]any), key[1:], value) + } else { + input[key[0]] = f(make(map[string]any), key[1:], value) + } + } + return input + } + data, err := json.Marshal(f(configMap, strings.Split(key, "."), value)) if err != nil { return err } c.Spec.Raw = data return nil } + +func (c *Config) GetValue(key string) (any, error) { + configMap := make(map[string]any) + if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil { + return nil, err + } + // get value + return configMap[key], nil +} diff --git a/pkg/apis/kubekey/v1/config_types_test.go b/pkg/apis/kubekey/v1/config_types_test.go new file mode 100644 index 00000000..18ebee13 --- /dev/null +++ b/pkg/apis/kubekey/v1/config_types_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" + "testing" +) + +func TestSetValue(t *testing.T) { + testcases := []struct { + name string + key string + val any + except Config + }{ + { + name: "one level", + key: "a", + val: 2, + except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":2}`)}}, + }, + { + name: "two level repeat key", + key: "a.b", + val: 2, + except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":{"b":2}}`)}}, + }, + { + name: "two level no-repeat key", + key: "b.c", + val: 2, + except: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1,"b":{"c":2}}`)}}, + }, + } + + for _, tc := range testcases { + in := Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}} + t.Run(tc.name, func(t *testing.T) { + err := in.SetValue(tc.key, tc.val) + assert.NoError(t, err) + t.Logf("%s", in.Spec.Raw) + t.Logf("%s", tc.except.Spec.Raw) + assert.Equal(t, tc.except, in) + }) + } +} diff --git a/pkg/apis/kubekey/v1/inventory_types.go b/pkg/apis/kubekey/v1/inventory_types.go index bc23bca0..27365889 100644 --- a/pkg/apis/kubekey/v1/inventory_types.go +++ b/pkg/apis/kubekey/v1/inventory_types.go @@ -51,7 +51,6 @@ type Inventory struct { metav1.ObjectMeta `json:"metadata,omitempty"` Spec InventorySpec `json:"spec,omitempty"` - //Status InventoryStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/kubekey/v1/pipeline_types.go b/pkg/apis/kubekey/v1/pipeline_types.go index 6d2a04f6..d960d7fe 100644 --- a/pkg/apis/kubekey/v1/pipeline_types.go +++ b/pkg/apis/kubekey/v1/pipeline_types.go @@ -33,8 +33,8 @@ const ( const ( // BuiltinsProjectAnnotation use builtins project of KubeKey BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project" - // PauseAnnotation pause the pipeline - PauseAnnotation = "kubekey.kubesphere.io/pause" + //// PauseAnnotation pause the pipeline + //PauseAnnotation = "kubekey.kubesphere.io/pause" ) type PipelineSpec struct { @@ -101,8 +101,6 @@ type PipelineTaskResult struct { Success int `json:"success,omitempty"` // Failed number of tasks. Failed int `json:"failed,omitempty"` - // Skipped number of tasks. - Skipped int `json:"skipped,omitempty"` // Ignored number of tasks. Ignored int `json:"ignored,omitempty"` } diff --git a/pkg/apis/kubekey/v1alpha1/task_types.go b/pkg/apis/kubekey/v1alpha1/task_types.go index 6ee42eb5..23399702 100644 --- a/pkg/apis/kubekey/v1alpha1/task_types.go +++ b/pkg/apis/kubekey/v1alpha1/task_types.go @@ -28,7 +28,6 @@ const ( TaskPhaseRunning TaskPhase = "Running" TaskPhaseSuccess TaskPhase = "Success" TaskPhaseFailed TaskPhase = "Failed" - TaskPhaseSkipped TaskPhase = "Skipped" TaskPhaseIgnored TaskPhase = "Ignored" ) @@ -103,11 +102,7 @@ type TaskList struct { } func (t Task) IsComplete() bool { - return t.IsSucceed() || t.IsFailed() || t.IsSkipped() -} - -func (t Task) IsSkipped() bool { - return t.Status.Phase == TaskPhaseSkipped + return t.IsSucceed() || t.IsFailed() } func (t Task) IsSucceed() bool { diff --git a/pkg/connector/connector.go b/pkg/connector/connector.go index 6729f699..d98f3c2c 100644 --- a/pkg/connector/connector.go +++ b/pkg/connector/connector.go @@ -22,6 +22,7 @@ import ( "io/fs" "os" + "k8s.io/klog/v2" "k8s.io/utils/exec" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -43,34 +44,71 @@ type Connector interface { } // NewConnector creates a new connector -func NewConnector(host string, vars variable.VariableData) Connector { +// if set connector to local, use local connector +// if set connector to ssh, use ssh connector +// if connector is not set. when host is localhost, use local connector, else use ssh connector +// vars contains all inventory for host. It's best to define the connector info in inventory file. +func NewConnector(host string, vars map[string]any) (Connector, error) { switch vars["connector"] { case "local": - return &localConnector{Cmd: exec.New()} + return &localConnector{Cmd: exec.New()}, nil case "ssh": - if variable.StringVar(vars, "ssh_host") != nil { - host = *variable.StringVar(vars, "ssh_host") + hostParam, err := variable.StringVar(nil, vars, "ssh_host") + if err != nil { + return nil, err + } + + portParam, err := variable.IntVar(nil, vars, "ssh_port") + if err != nil { // default port 22 + klog.InfoS("get ssh port failed use default port 22", "error", err) + portParam = 22 + } + + userParam, err := variable.StringVar(nil, vars, "ssh_user") + if err != nil { + return nil, err + } + + passParam, err := variable.StringVar(nil, vars, "ssh_password") + if err != nil { + return nil, err } return &sshConnector{ - Host: host, - Port: variable.IntVar(vars, "ssh_port"), - User: variable.StringVar(vars, "ssh_user"), - Password: variable.StringVar(vars, "ssh_password"), - } + Host: hostParam, + Port: portParam, + User: userParam, + Password: passParam, + }, nil default: localHost, _ := os.Hostname() if host == _const.LocalHostName || localHost == host { - return &localConnector{Cmd: exec.New()} + return &localConnector{Cmd: exec.New()}, nil } - if variable.StringVar(vars, "ssh_host") != nil { - host = *variable.StringVar(vars, "ssh_host") + hostParam, err := variable.StringVar(nil, vars, "ssh_host") + if err != nil { + return nil, err + } + + portParam, err := variable.IntVar(nil, vars, "ssh_port") + if err != nil { + return nil, err + } + + userParam, err := variable.StringVar(nil, vars, "ssh_user") + if err != nil { + return nil, err + } + + passParam, err := variable.StringVar(nil, vars, "ssh_password") + if err != nil { + return nil, err } return &sshConnector{ - Host: host, - Port: variable.IntVar(vars, "ssh_port"), - User: variable.StringVar(vars, "ssh_user"), - Password: variable.StringVar(vars, "ssh_password"), - } + Host: hostParam, + Port: portParam, + User: userParam, + Password: passParam, + }, nil } } diff --git a/pkg/connector/local_connector.go b/pkg/connector/local_connector.go index d952a3c7..6253b073 100644 --- a/pkg/connector/local_connector.go +++ b/pkg/connector/local_connector.go @@ -43,17 +43,17 @@ func (c *localConnector) CopyFile(ctx context.Context, local []byte, remoteFile // create remote file if _, err := os.Stat(filepath.Dir(remoteFile)); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(remoteFile), mode); err != nil { - klog.V(4).ErrorS(err, "Failed to create remote dir", "remote_file", remoteFile) + klog.V(4).ErrorS(err, "Failed to create local dir", "remote_file", remoteFile) return err } } rf, err := os.Create(remoteFile) if err != nil { - klog.V(4).ErrorS(err, "Failed to create remote file", "remote_file", remoteFile) + klog.V(4).ErrorS(err, "Failed to create local file", "remote_file", remoteFile) return err } if _, err := rf.Write(local); err != nil { - klog.V(4).ErrorS(err, "Failed to write content to remote file", "remote_file", remoteFile) + klog.V(4).ErrorS(err, "Failed to write content to local file", "remote_file", remoteFile) return err } return rf.Chmod(mode) diff --git a/pkg/connector/local_connector_test.go b/pkg/connector/local_connector_test.go index da0b3fc3..b0d3e5ed 100644 --- a/pkg/connector/local_connector_test.go +++ b/pkg/connector/local_connector_test.go @@ -32,7 +32,7 @@ func newFakeLocalConnector(runCmd string, output string) *localConnector { return &localConnector{ Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { - if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == runCmd { + if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == fmt.Sprintf("/bin/sh -c %s", runCmd) { return &testingexec.FakeCmd{ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) { return []byte(output), nil, nil diff --git a/pkg/connector/ssh_connector.go b/pkg/connector/ssh_connector.go index 91eb7bbd..73eacc21 100644 --- a/pkg/connector/ssh_connector.go +++ b/pkg/connector/ssh_connector.go @@ -23,20 +23,18 @@ import ( "io/fs" "os" "path/filepath" - "strconv" "time" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" "k8s.io/klog/v2" - "k8s.io/utils/pointer" ) type sshConnector struct { Host string - Port *int - User *string - Password *string + Port int + User string + Password string client *ssh.Client } @@ -44,23 +42,20 @@ func (c *sshConnector) Init(ctx context.Context) error { if c.Host == "" { return fmt.Errorf("host is not set") } - if c.Port == nil { - c.Port = pointer.Int(22) - } var auth []ssh.AuthMethod - if c.Password != nil { + if c.Password != "" { auth = []ssh.AuthMethod{ - ssh.Password(*c.Password), + ssh.Password(c.Password), } } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.Host, strconv.Itoa(*c.Port)), &ssh.ClientConfig{ - User: pointer.StringDeref(c.User, ""), + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%v", c.Host, c.Port), &ssh.ClientConfig{ + User: c.User, Auth: auth, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Timeout: 30 * time.Second, }) if err != nil { - klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", *c.Port) + klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", c.Port) return err } c.client = sshClient @@ -109,12 +104,14 @@ func (c *sshConnector) FetchFile(ctx context.Context, remoteFile string, local i return err } defer sftpClient.Close() + rf, err := sftpClient.Open(remoteFile) if err != nil { klog.V(4).ErrorS(err, "Failed to open file", "remote_file", remoteFile) return err } defer rf.Close() + if _, err := io.Copy(local, rf); err != nil { klog.V(4).ErrorS(err, "Failed to copy file", "remote_file", remoteFile) return err @@ -123,7 +120,7 @@ func (c *sshConnector) FetchFile(ctx context.Context, remoteFile string, local i } func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) { - klog.V(4).InfoS("exec ssh command", "cmd", cmd) + klog.V(4).InfoS("exec ssh command", "cmd", cmd, "host", c.Host) // create ssh session session, err := c.client.NewSession() if err != nil { diff --git a/pkg/const/common.go b/pkg/const/common.go index 18849e7a..bf88b078 100644 --- a/pkg/const/common.go +++ b/pkg/const/common.go @@ -19,10 +19,11 @@ package _const // LocalHostName is the default local host name in inventory. const LocalHostName = "localhost" -// the file in builtinProject +// variable specific key of top level const ( - // default config file - BuiltinConfigFile = "inventory/config.yaml" - // default inventory file - BuiltinInventoryFile = "inventory/inventory.yaml" + VariableHostName = "inventory_name" + // VariableGlobalHosts the key is host_name, the value is host_var which defined in inventory. + VariableGlobalHosts = "inventory_hosts" + // VariableGroups the key is group's name, the value is a host_name slice + VariableGroups = "groups" ) diff --git a/pkg/const/workdir.go b/pkg/const/workdir.go index 94bbfbe5..54b323d8 100644 --- a/pkg/const/workdir.go +++ b/pkg/const/workdir.go @@ -102,9 +102,6 @@ const RuntimePipelineDir = "pipelines" // RuntimePipelineVariableDir is a fixed directory name under runtime, used to store the task execution parameters. const RuntimePipelineVariableDir = "variable" -// RuntimePipelineVariableLocationFile is a location variable file under RuntimePipelineVariableDir -const RuntimePipelineVariableLocationFile = "location.json" - // RuntimePipelineTaskDir is a fixed directory name under runtime, used to store the task execution status. // task.yaml is the data of Task resource diff --git a/pkg/controllers/pipeline_controller.go b/pkg/controllers/pipeline_controller.go index 3a16e688..da7e57ae 100644 --- a/pkg/controllers/pipeline_controller.go +++ b/pkg/controllers/pipeline_controller.go @@ -18,7 +18,8 @@ package controllers import ( "context" - "fmt" + "github.com/kubesphere/kubekey/v4/pkg/executor" + "k8s.io/apimachinery/pkg/runtime" "os" "k8s.io/apimachinery/pkg/api/errors" @@ -32,7 +33,6 @@ import ( kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" _const "github.com/kubesphere/kubekey/v4/pkg/const" - "github.com/kubesphere/kubekey/v4/pkg/task" "github.com/kubesphere/kubekey/v4/pkg/variable" ) @@ -41,11 +41,10 @@ const ( ) type PipelineReconciler struct { + *runtime.Scheme ctrlclient.Client record.EventRecorder - TaskController task.Controller - ctrlfinalizer.Finalizers } @@ -111,11 +110,11 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kubekeyv1.Pipeline) (ctrl.Result, error) { - if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok { - // if pipeline is paused, do nothing - klog.V(5).InfoS("pipeline is paused", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return ctrl.Result{}, nil - } + //if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok { + // // if pipeline is paused, do nothing + // klog.V(5).InfoS("pipeline is paused", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + // return ctrl.Result{}, nil + //} cp := pipeline.DeepCopy() defer func() { @@ -125,11 +124,9 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline * } }() - if err := r.TaskController.AddTasks(ctx, pipeline); err != nil { - klog.V(5).ErrorS(err, "add task error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed - pipeline.Status.Reason = fmt.Sprintf("add task to controller failed: %v", err) - return ctrl.Result{}, err + if err := executor.NewTaskExecutor(r.Scheme, r.Client, pipeline).Exec(ctx); err != nil { + klog.ErrorS(err, "Create task controller error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return ctrl.Result{}, nil } return ctrl.Result{}, nil diff --git a/pkg/controllers/task_controller.go b/pkg/controllers/task_controller.go deleted file mode 100644 index 64aa45cc..00000000 --- a/pkg/controllers/task_controller.go +++ /dev/null @@ -1,540 +0,0 @@ -/* -Copyright 2023 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "reflect" - "regexp" - "strings" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - "github.com/kubesphere/kubekey/v4/pkg/converter" - "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" - "github.com/kubesphere/kubekey/v4/pkg/modules" - "github.com/kubesphere/kubekey/v4/pkg/variable" -) - -type TaskReconciler struct { - // Client to resources - ctrlclient.Client -} - -type taskReconcileOptions struct { - *kubekeyv1.Pipeline - *kubekeyv1alpha1.Task - variable.Variable -} - -func (r *TaskReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { - klog.V(5).InfoS("start task reconcile", "task", request.String()) - defer klog.V(5).InfoS("finish task reconcile", "task", request.String()) - // get task - var task = &kubekeyv1alpha1.Task{} - if err := r.Client.Get(ctx, request.NamespacedName, task); err != nil { - klog.V(5).ErrorS(err, "get task error", "task", request.String()) - return ctrl.Result{}, nil - } - - // if task is deleted, skip - if task.DeletionTimestamp != nil { - klog.V(5).InfoS("task is deleted, skip", "task", request.String()) - return ctrl.Result{}, nil - } - - // get pipeline - var pipeline = &kubekeyv1.Pipeline{} - for _, ref := range task.OwnerReferences { - if ref.Kind == "Pipeline" { - if err := r.Client.Get(ctx, types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}, pipeline); err != nil { - if errors.IsNotFound(err) { - klog.V(5).InfoS("pipeline is deleted, skip", "task", request.String()) - return ctrl.Result{}, nil - } - klog.V(5).ErrorS(err, "get pipeline error", "task", request.String(), "pipeline", types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}.String()) - return ctrl.Result{}, err - } - break - } - } - - if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok { - klog.V(5).InfoS("pipeline is paused, skip", "task", request.String()) - return ctrl.Result{}, nil - } - - // get variable - v, err := variable.GetVariable(variable.Options{ - Ctx: ctx, - Client: r.Client, - Pipeline: *pipeline, - }) - if err != nil { - return ctrl.Result{}, err - } - - defer func() { - if task.IsComplete() { - klog.Infof("[Task %s] \"%s\" is complete.Result is: %s", request.String(), task.Spec.Name, task.Status.Phase) - } - var nsTasks = &kubekeyv1alpha1.TaskList{} - klog.V(5).InfoS("update pipeline status", "task", request.String(), "pipeline", ctrlclient.ObjectKeyFromObject(pipeline).String()) - if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingFields{ - kubekeyv1alpha1.TaskOwnerField: ctrlclient.ObjectKeyFromObject(pipeline).String(), - }); err != nil { - klog.V(5).ErrorS(err, "list task error", "task", request.String()) - return - } - cp := pipeline.DeepCopy() - converter.CalculatePipelineStatus(nsTasks, pipeline) - if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil { - klog.V(5).ErrorS(err, "update pipeline status error", "task", request.String(), "pipeline", ctrlclient.ObjectKeyFromObject(pipeline).String()) - } - }() - - switch task.Status.Phase { - case kubekeyv1alpha1.TaskPhaseFailed: - if task.Spec.Retries > task.Status.RestartCount { - task.Status.Phase = kubekeyv1alpha1.TaskPhasePending - task.Status.RestartCount++ - if err := r.Client.Status().Update(ctx, task); err != nil { - klog.V(5).ErrorS(err, "update task error", "task", request.String()) - return ctrl.Result{}, err - } - } - return ctrl.Result{}, nil - case kubekeyv1alpha1.TaskPhasePending: - // deal pending task - return r.dealPendingTask(ctx, taskReconcileOptions{ - Pipeline: pipeline, - Task: task, - Variable: v, - }) - case kubekeyv1alpha1.TaskPhaseRunning: - // deal running task - return r.dealRunningTask(ctx, taskReconcileOptions{ - Pipeline: pipeline, - Task: task, - Variable: v, - }) - default: - return ctrl.Result{}, nil - } -} - -func (r *TaskReconciler) dealPendingTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) { - var nsTasks = &kubekeyv1alpha1.TaskList{} - if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(options.Pipeline.Namespace), ctrlclient.MatchingFields{ - kubekeyv1alpha1.TaskOwnerField: ctrlclient.ObjectKeyFromObject(options.Pipeline).String(), - }); err != nil { - klog.V(5).ErrorS(err, "list task error", "task", ctrlclient.ObjectKeyFromObject(options.Task).String(), err) - return ctrl.Result{}, err - } - - // Infer the current task's phase from its dependent tasks. - dl, err := options.Variable.Get(variable.InferPhase{ - LocationUID: string(options.Task.UID), - Tasks: nsTasks.Items, - }) - klog.InfoS("infer phase", "phase", dl, "task-name", options.Task.Spec.Name) - if err != nil { - klog.V(5).ErrorS(err, "find dependency error", "task", ctrlclient.ObjectKeyFromObject(options.Task).String()) - return ctrl.Result{}, err - } - - // Based on the results of the executed tasks dependent on, infer the next phase of the current task. - switch dl.(kubekeyv1alpha1.TaskPhase) { - case kubekeyv1alpha1.TaskPhasePending: - return ctrl.Result{Requeue: true}, nil - case kubekeyv1alpha1.TaskPhaseRunning: - // update task phase to running - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseRunning - if err := r.Client.Status().Update(ctx, options.Task); err != nil { - klog.V(5).ErrorS(err, "update task to Running error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - } - return ctrl.Result{Requeue: true}, nil - case kubekeyv1alpha1.TaskPhaseSkipped: - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSkipped - if err := r.Client.Status().Update(ctx, options.Task); err != nil { - klog.V(5).ErrorS(err, "update task to Skipped error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - } - return ctrl.Result{}, nil - default: - return ctrl.Result{}, fmt.Errorf("unknown TependencyTask.Strategy result. only support: Pending, Running, Skipped") - } -} - -func (r *TaskReconciler) dealRunningTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) { - if err := r.prepareTask(ctx, options); err != nil { - klog.V(5).ErrorS(err, "prepare task error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - return ctrl.Result{}, nil - } - // find task in location - if err := r.executeTask(ctx, options); err != nil { - klog.V(5).ErrorS(err, "execute task error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - return ctrl.Result{}, nil - } - return ctrl.Result{}, nil -} - -func (r *TaskReconciler) prepareTask(ctx context.Context, options taskReconcileOptions) error { - // trans variable to location - // if variable contains template syntax. parse it and store in host. - for _, h := range options.Task.Spec.Hosts { - host := h - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed - options.Task.Status.FailedDetail = append(options.Task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{ - Host: host, - StdErr: "parse variable error", - }) - return err - } - - var curVariable = lg.(variable.VariableData).DeepCopy() - if pt := variable.BoolVar(curVariable, "prepareTask"); pt != nil && *pt { - klog.InfoS("prepareTask is true, skip", "task", ctrlclient.ObjectKeyFromObject(options.Task), "host", host) - continue - } - - var parseTmpl = func(tmplStr string) (string, error) { - return tmpl.ParseString(curVariable, tmplStr) - } - // parse variable with three time. ( support 3 level reference.) - for i := 0; i < 3; i++ { - if err := r.parseVariable(ctx, curVariable, parseTmpl); err != nil { - klog.V(5).ErrorS(err, "parse variable error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed - options.Task.Status.FailedDetail = append(options.Task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{ - Host: host, - StdErr: fmt.Sprintf("parse variable error: %s", err.Error()), - }) - return err - } - } - - // set prepareTask to true - curVariable["prepareTask"] = true - if err := options.Variable.Merge(variable.HostMerge{ - HostNames: []string{h}, - LocationUID: string(options.Task.UID), - Data: curVariable, - }); err != nil { - return err - } - } - return nil -} - -func (r *TaskReconciler) parseVariable(ctx context.Context, in variable.VariableData, parseTmplFunc func(string) (string, error)) error { - for k, v := range in { - switch reflect.TypeOf(v).Kind() { - case reflect.String: - if r.isTmplSyntax(v.(string)) { - newValue, err := parseTmplFunc(v.(string)) - if err != nil { - return err - } - in[k] = newValue - } - case reflect.Map: - // variable.VariableData has one more String() method than map[string]any, - // so variable.VariableData and map[string]any cannot be converted to each other. - if vv, ok := v.(map[string]interface{}); ok { - if err := r.parseVariable(ctx, vv, parseTmplFunc); err != nil { - return err - } - } - if vv, ok := v.(variable.VariableData); ok { - if err := r.parseVariable(ctx, vv, parseTmplFunc); err != nil { - return err - } - } - case reflect.Slice: - for i := 0; i < reflect.ValueOf(v).Len(); i++ { - elem := reflect.ValueOf(v).Index(i) - switch elem.Kind() { - case reflect.String: - if r.isTmplSyntax(elem.Interface().(string)) { - newValue, err := parseTmplFunc(elem.Interface().(string)) - if err != nil { - return err - } - reflect.ValueOf(v).Index(i).SetString(newValue) - } - case reflect.Map: - // variable.VariableData has one more String() method than map[string]any, - // so variable.VariableData and map[string]any cannot be converted to each other. - if vv, ok := v.(map[string]interface{}); ok { - if err := r.parseVariable(ctx, vv, parseTmplFunc); err != nil { - return err - } - } - if vv, ok := v.(variable.VariableData); ok { - if err := r.parseVariable(ctx, vv, parseTmplFunc); err != nil { - return err - } - } - } - } - } - } - return nil -} - -func (r *TaskReconciler) isTmplSyntax(s string) bool { - return (strings.Contains(s, "{{") && strings.Contains(s, "}}")) || - (strings.Contains(s, "{%") && strings.Contains(s, "%}")) -} - -func (r *TaskReconciler) executeTask(ctx context.Context, options taskReconcileOptions) error { - cd := kubekeyv1alpha1.TaskCondition{ - StartTimestamp: metav1.Now(), - } - defer func() { - cd.EndTimestamp = metav1.Now() - options.Task.Status.Conditions = append(options.Task.Status.Conditions, cd) - if err := r.Client.Status().Update(ctx, options.Task); err != nil { - klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - } - }() - - // check task host results - wg := &wait.Group{} - dataChan := make(chan kubekeyv1alpha1.TaskHostResult, len(options.Task.Spec.Hosts)) - for _, h := range options.Task.Spec.Hosts { - host := h - wg.StartWithContext(ctx, func(ctx context.Context) { - var stdout, stderr string - defer func() { - if stderr != "" { - klog.Errorf("[Task %s] run failed: %s", ctrlclient.ObjectKeyFromObject(options.Task), stderr) - } - - dataChan <- kubekeyv1alpha1.TaskHostResult{ - Host: host, - Stdout: stdout, - StdErr: stderr, - } - if options.Task.Spec.Register != "" { - puid, err := options.Variable.Get(variable.ParentLocation{LocationUID: string(options.Task.UID)}) - if err != nil { - klog.V(5).ErrorS(err, "get location error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - return - } - // set variable to parent location - if err := options.Variable.Merge(variable.HostMerge{ - HostNames: []string{h}, - LocationUID: puid.(string), - Data: variable.VariableData{ - options.Task.Spec.Register: map[string]string{ - "stdout": stdout, - "stderr": stderr, - }, - }, - }); err != nil { - klog.V(5).ErrorS(err, "register task result to variable error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - return - } - } - }() - - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - stderr = err.Error() - return - } - // check when condition - if len(options.Task.Spec.When) > 0 { - ok, err := tmpl.ParseBool(lg.(variable.VariableData), options.Task.Spec.When) - if err != nil { - klog.V(5).ErrorS(err, "parse when condition error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - stderr = err.Error() - return - } - if !ok { - stdout = "skip by when" - return - } - } - - // execute module with loop - loop, err := r.execLoop(ctx, host, options) - if err != nil { - klog.V(5).ErrorS(err, "parse loop vars error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - stderr = err.Error() - return - } - - for _, item := range loop { - switch item.(type) { - case nil: - // do nothing - case string: - item, err = tmpl.ParseString(lg.(variable.VariableData), item.(string)) - if err != nil { - klog.V(5).ErrorS(err, "parse loop vars error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - stderr = err.Error() - return - } - case variable.VariableData: - for k, v := range item.(variable.VariableData) { - sv, err := tmpl.ParseString(lg.(variable.VariableData), v.(string)) - if err != nil { - klog.V(5).ErrorS(err, "parse loop vars error", "task", ctrlclient.ObjectKeyFromObject(options.Task)) - stderr = err.Error() - return - } - item.(map[string]any)[k] = sv - } - default: - stderr = "unknown loop vars, only support string or map[string]string" - return - } - // set item to runtime variable - if err := options.Variable.Merge(variable.HostMerge{ - HostNames: []string{h}, - LocationUID: string(options.Task.UID), - Data: variable.VariableData{ - "item": item, - }, - }); err != nil { - stderr = "set loop item to variable error" - return - } - stdout, stderr = r.executeModule(ctx, options.Task, modules.ExecOptions{ - Args: options.Task.Spec.Module.Args, - Host: host, - Variable: options.Variable, - Task: *options.Task, - Pipeline: *options.Pipeline, - }) - } - }) - } - go func() { - wg.Wait() - close(dataChan) - }() - - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess - for data := range dataChan { - if data.StdErr != "" { - if options.Task.Spec.IgnoreError { - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseIgnored - } else { - options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed - options.Task.Status.FailedDetail = append(options.Task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{ - Host: data.Host, - Stdout: data.Stdout, - StdErr: data.StdErr, - }) - } - } - cd.HostResults = append(cd.HostResults, data) - } - - return nil -} - -func (r *TaskReconciler) execLoop(ctx context.Context, host string, options taskReconcileOptions) ([]any, error) { - switch { - case options.Task.Spec.Loop.Raw == nil: - // loop is not set. add one element to execute once module. - return []any{nil}, nil - case variable.Extension2Slice(options.Task.Spec.Loop) != nil: - return variable.Extension2Slice(options.Task.Spec.Loop), nil - case variable.Extension2String(options.Task.Spec.Loop) != "": - value := variable.Extension2String(options.Task.Spec.Loop) - // parse value by pongo2. if - data, err := options.Variable.Get(variable.LocationVars{ - HostName: host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - return nil, err - } - sv, err := tmpl.ParseString(data.(variable.VariableData), value) - if err != nil { - return nil, err - } - switch { - case regexp.MustCompile(`^<\[\](.*?) Value>$`).MatchString(sv): - // in pongo2 we cannot get slice value. add extension filter value. - vdata, err := options.Variable.Get(variable.KeyPath{ - HostName: host, - LocationUID: string(options.Task.UID), - Path: strings.Split(strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(value, "{{"), "}}")), - "."), - }) - if err != nil { - return nil, err - } - if _, ok := vdata.([]any); ok { - return vdata.([]any), nil - } - default: - // value is simple string - return []any{sv}, nil - } - } - return nil, fmt.Errorf("unsupport loop value") -} - -func (r *TaskReconciler) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task, opts modules.ExecOptions) (string, string) { - lg, err := opts.Variable.Get(variable.LocationVars{ - HostName: opts.Host, - LocationUID: string(task.UID), - }) - if err != nil { - klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(task)) - return "", err.Error() - } - - // check failed when condition - if len(task.Spec.FailedWhen) > 0 { - ok, err := tmpl.ParseBool(lg.(variable.VariableData), task.Spec.FailedWhen) - if err != nil { - klog.V(5).ErrorS(err, "validate FailedWhen condition error", "task", ctrlclient.ObjectKeyFromObject(task)) - return "", err.Error() - } - if ok { - return "", "failed by failedWhen" - } - } - - return modules.FindModule(task.Spec.Module.Name)(ctx, opts) -} diff --git a/pkg/converter/converter.go b/pkg/converter/converter.go index ebe173d9..32dfb070 100644 --- a/pkg/converter/converter.go +++ b/pkg/converter/converter.go @@ -19,215 +19,19 @@ package converter import ( "context" "fmt" - "io/fs" "math" - "path/filepath" "strconv" "strings" - "gopkg.in/yaml.v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" "k8s.io/klog/v2" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - _const "github.com/kubesphere/kubekey/v4/pkg/const" - "github.com/kubesphere/kubekey/v4/pkg/project" - "github.com/kubesphere/kubekey/v4/pkg/variable" ) -// MarshalPlaybook kkcorev1.Playbook from a playbook file -func MarshalPlaybook(baseFS fs.FS, pbPath string) (*kkcorev1.Playbook, error) { - // convert playbook to kkcorev1.Playbook - pb := &kkcorev1.Playbook{} - if err := loadPlaybook(baseFS, pbPath, pb); err != nil { - klog.V(4).ErrorS(err, "Load playbook failed", "playbook", pbPath) - return nil, err - } - - // convertRoles - if err := convertRoles(baseFS, pbPath, pb); err != nil { - klog.V(4).ErrorS(err, "ConvertRoles error", "playbook", pbPath) - return nil, err - } - - if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil { - klog.V(4).ErrorS(err, "ConvertIncludeTasks error", "playbook", pbPath) - return nil, err - } - - if err := pb.Validate(); err != nil { - klog.V(4).ErrorS(err, "Validate playbook failed", "playbook", pbPath) - return nil, err - } - return pb, nil -} - -// loadPlaybook with include_playbook. Join all playbooks into one playbook -func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { - // baseDir is the local ansible project dir which playbook belong to - pbData, err := fs.ReadFile(baseFS, pbPath) - if err != nil { - klog.V(4).ErrorS(err, "Read playbook failed", "playbook", pbPath) - return err - } - var plays []kkcorev1.Play - if err := yaml.Unmarshal(pbData, &plays); err != nil { - klog.V(4).ErrorS(err, "Unmarshal playbook failed", "playbook", pbPath) - return err - } - - for _, p := range plays { - if p.ImportPlaybook != "" { - importPlaybook := project.GetPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook) - if importPlaybook == "" { - return fmt.Errorf("cannot found import playbook %s", importPlaybook) - } - if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil { - return err - } - } - - // fill block in roles - for i, r := range p.Roles { - roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role) - if roleBase == "" { - return fmt.Errorf("cannot found role %s", r.Role) - } - mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) - if mainTask == "" { - return fmt.Errorf("cannot found main task for role %s", r.Role) - } - - rdata, err := fs.ReadFile(baseFS, mainTask) - if err != nil { - klog.V(4).ErrorS(err, "Read role failed", "playbook", pbPath, "role", r.Role) - return err - } - var blocks []kkcorev1.Block - if err := yaml.Unmarshal(rdata, &blocks); err != nil { - klog.V(4).ErrorS(err, "Unmarshal role failed", "playbook", pbPath, "role", r.Role) - return err - } - p.Roles[i].Block = blocks - } - pb.Play = append(pb.Play, p) - } - - return nil -} - -// convertRoles convert roleName to block -func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { - for i, p := range pb.Play { - for i, r := range p.Roles { - roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role) - if roleBase == "" { - return fmt.Errorf("cannot found role %s", r.Role) - } - - // load block - mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) - if mainTask == "" { - return fmt.Errorf("cannot found main task for role %s", r.Role) - } - - rdata, err := fs.ReadFile(baseFS, mainTask) - if err != nil { - klog.V(4).ErrorS(err, "Read role failed", "playbook", pbPath, "role", r.Role) - return err - } - var blocks []kkcorev1.Block - if err := yaml.Unmarshal(rdata, &blocks); err != nil { - klog.V(4).ErrorS(err, "Unmarshal role failed", "playbook", pbPath, "role", r.Role) - return err - } - p.Roles[i].Block = blocks - - // load defaults (optional) - mainDefault := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile)) - if mainDefault != "" { - mainData, err := fs.ReadFile(baseFS, mainDefault) - if err != nil { - klog.V(4).ErrorS(err, "Read defaults variable for role error", "playbook", pbPath, "role", r.Role) - return err - } - var vars variable.VariableData - if err := yaml.Unmarshal(mainData, &vars); err != nil { - klog.V(4).ErrorS(err, "Unmarshal defaults variable for role error", "playbook", pbPath, "role", r.Role) - return err - } - p.Roles[i].Vars = vars - } - } - pb.Play[i] = p - } - return nil -} - -// convertIncludeTasks from file to blocks -func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { - var pbBase = filepath.Dir(filepath.Dir(pbPath)) - for _, play := range pb.Play { - if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil { - klog.V(4).ErrorS(err, "Convert pre_tasks error", "playbook", pbPath) - return err - } - if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil { - klog.V(4).ErrorS(err, "Convert tasks error", "playbook", pbPath) - return err - } - if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil { - klog.V(4).ErrorS(err, "Convert post_tasks error", "playbook", pbPath) - return err - } - - for _, r := range play.Roles { - roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role) - if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil { - klog.V(4).ErrorS(err, "Convert role error", "playbook", pbPath, "role", r.Role) - return err - } - } - } - return nil -} - -func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkcorev1.Block) error { - for i, b := range blocks { - if b.IncludeTasks != "" { - data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks)) - if err != nil { - klog.V(4).ErrorS(err, "Read includeTask file error", "name", b.Name, "file_path", filepath.Join(baseDir, b.IncludeTasks)) - return err - } - var bs []kkcorev1.Block - if err := yaml.Unmarshal(data, &bs); err != nil { - klog.V(4).ErrorS(err, "Unmarshal includeTask data error", "name", b.Name, "file_path", filepath.Join(baseDir, b.IncludeTasks)) - return err - } - b.Block = bs - blocks[i] = b - } - if err := fileToBlock(baseFS, baseDir, b.Block); err != nil { - klog.V(4).ErrorS(err, "Convert block error", "name", b.Name) - return err - } - if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil { - klog.V(4).ErrorS(err, "Convert rescue error", "name", b.Name) - return err - } - if err := fileToBlock(baseFS, baseDir, b.Always); err != nil { - klog.V(4).ErrorS(err, "Convert always error", "name", b.Name) - return err - } - } - return nil -} - // MarshalBlock marshal block to task func MarshalBlock(ctx context.Context, role string, hosts []string, when []string, block kkcorev1.Block) *kubekeyv1alpha1.Task { task := &kubekeyv1alpha1.Task{ @@ -267,135 +71,53 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) { if len(serial) == 0 { return [][]string{hosts}, nil } - result := make([][]string, 0) - sp := 0 - for _, a := range serial { + + // convertSerial to []int + var sis = make([]int, len(serial)) + // the count for sis + var count int + for i, a := range serial { switch a.(type) { case int: - if sp+a.(int) > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+a.(int)]) - sp += a.(int) + sis[i] = a.(int) case string: if strings.HasSuffix(a.(string), "%") { - b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%")) + b, err := strconv.ParseFloat(a.(string)[:len(a.(string))-1], 64) if err != nil { - klog.V(4).ErrorS(err, "Convert serial to int failed", "serial", a.(string)) - return nil, err + return nil, fmt.Errorf("convert serial %v to float error", a) } - if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))]) - sp += int(math.Ceil(float64(len(hosts)*b) / 100.0)) + sis[i] = int(math.Ceil(float64(len(hosts)) * b / 100.0)) } else { b, err := strconv.Atoi(a.(string)) if err != nil { - klog.V(4).ErrorS(err, "Convert serial to int failed", "serial", a.(string)) - return nil, err + return nil, fmt.Errorf("convert serial %v to int faiiled", a) } - if sp+b > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+b]) - sp += b + sis[i] = b } default: return nil, fmt.Errorf("unknown serial type. only support int or percent") } - } - // if serial is not match all hosts. use last serial - if sp < len(hosts) { - a := serial[len(serial)-1] - for { - switch a.(type) { - case int: - if sp+a.(int) > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+a.(int)]) - sp += a.(int) - case string: - if strings.HasSuffix(a.(string), "%") { - b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%")) - if err != nil { - klog.V(4).ErrorS(err, "Convert serial to int failed", "serial", a.(string)) - return nil, err - } - if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))]) - sp += int(math.Ceil(float64(len(hosts)*b) / 100.0)) - } else { - b, err := strconv.Atoi(a.(string)) - if err != nil { - klog.V(4).ErrorS(err, "Convert serial to int failed", "serial", a.(string)) - return nil, err - } - if sp+b > len(hosts)-1 { - result = append(result, hosts[sp:]) - return result, nil - } - result = append(result, hosts[sp:sp+b]) - sp += b - } - default: - return nil, fmt.Errorf("unknown serial type. only support int or percent") - } + if sis[i] == 0 { + return nil, fmt.Errorf("serial %v should not be zero", a) } + count += sis[i] + } + if len(hosts) > count { + for i := 0.0; i < float64(len(hosts)-count)/float64(sis[len(sis)-1]); i++ { + sis = append(sis, sis[len(sis)-1]) + } + } + + // total result + result := make([][]string, len(sis)) + var begin, end int + for i, si := range sis { + end += si + if end > len(hosts) { + end = len(hosts) + } + result[i] = hosts[begin:end] + begin += si } return result, nil } - -// CalculatePipelineStatus calculate pipeline status from tasks -func CalculatePipelineStatus(nsTasks *kubekeyv1alpha1.TaskList, pipeline *kubekeyv1.Pipeline) { - if pipeline.Status.Phase != kubekeyv1.PipelinePhaseRunning { - // only deal running pipeline - return - } - pipeline.Status.TaskResult = kubekeyv1.PipelineTaskResult{ - Total: len(nsTasks.Items), - } - var failedDetail []kubekeyv1.PipelineFailedDetail - for _, t := range nsTasks.Items { - switch t.Status.Phase { - case kubekeyv1alpha1.TaskPhaseSuccess: - pipeline.Status.TaskResult.Success++ - case kubekeyv1alpha1.TaskPhaseIgnored: - pipeline.Status.TaskResult.Ignored++ - case kubekeyv1alpha1.TaskPhaseSkipped: - pipeline.Status.TaskResult.Skipped++ - } - if t.Status.Phase == kubekeyv1alpha1.TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount { - var hostReason []kubekeyv1.PipelineFailedDetailHost - for _, tr := range t.Status.FailedDetail { - hostReason = append(hostReason, kubekeyv1.PipelineFailedDetailHost{ - Host: tr.Host, - Stdout: tr.Stdout, - StdErr: tr.StdErr, - }) - } - failedDetail = append(failedDetail, kubekeyv1.PipelineFailedDetail{ - Task: t.Name, - Hosts: hostReason, - }) - pipeline.Status.TaskResult.Failed++ - } - } - - if pipeline.Status.TaskResult.Failed != 0 { - pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed - pipeline.Status.Reason = "task failed" - pipeline.Status.FailedDetail = failedDetail - } else if pipeline.Status.TaskResult.Total == pipeline.Status.TaskResult.Success+pipeline.Status.TaskResult.Ignored+pipeline.Status.TaskResult.Skipped { - pipeline.Status.Phase = kubekeyv1.PipelinePhaseSucceed - } - -} diff --git a/pkg/converter/converter_test.go b/pkg/converter/converter_test.go index b6ccfcda..f4cebb20 100644 --- a/pkg/converter/converter_test.go +++ b/pkg/converter/converter_test.go @@ -17,121 +17,11 @@ limitations under the License. package converter import ( - "os" "testing" "github.com/stretchr/testify/assert" - - kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) -func TestMarshalPlaybook(t *testing.T) { - testcases := []struct { - name string - file string - except *kkcorev1.Playbook - }{ - { - name: "marshal playbook", - file: "playbooks/playbook1.yaml", - except: &kkcorev1.Playbook{[]kkcorev1.Play{ - { - Base: kkcorev1.Base{Name: "play1"}, - PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}}, - Roles: []kkcorev1.Role{ - {kkcorev1.RoleInfo{ - Role: "role1", - Block: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "role1 | block1"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }, - }}, - }, - Handlers: nil, - PreTasks: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | pre_block1"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }, - PostTasks: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | post_block1"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }, - Tasks: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1"}}, - BlockInfo: kkcorev1.BlockInfo{Block: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block1"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block2"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }}, - }, - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block2"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }, - }, - { - Base: kkcorev1.Base{Name: "play2"}, - PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}}, - Tasks: []kkcorev1.Block{ - { - BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play2 | block1"}}, - Task: kkcorev1.Task{UnknownFiled: map[string]any{ - "debug": map[string]any{ - "msg": "echo \"hello world\"", - }, - }}, - }, - }, - }, - }}, - }, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - pb, err := MarshalPlaybook(os.DirFS("testdata"), tc.file) - assert.NoError(t, err) - assert.Equal(t, tc.except, pb) - }) - } -} - func TestGroupHostBySerial(t *testing.T) { hosts := []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7"} testcases := []struct { diff --git a/pkg/converter/testdata/playbooks/playbook1.yaml b/pkg/converter/testdata/playbooks/playbook1.yaml deleted file mode 100644 index 08e2e5d0..00000000 --- a/pkg/converter/testdata/playbooks/playbook1.yaml +++ /dev/null @@ -1,30 +0,0 @@ -- name: play1 - hosts: localhost - pre_tasks: - - name: play1 | pre_block1 - debug: - msg: echo "hello world" - tasks: - - name: play1 | block1 - block: - - name: play1 | block1 | block1 - debug: - msg: echo "hello world" - - name: play1 | block1 | block2 - debug: - msg: echo "hello world" - - name: play1 | block2 - debug: - msg: echo "hello world" - post_tasks: - - name: play1 | post_block1 - debug: - msg: echo "hello world" - roles: - - role1 -- name: play2 - hosts: localhost - tasks: - - name: play2 | block1 - debug: - msg: echo "hello world" diff --git a/pkg/converter/testdata/roles/role1/tasks/main.yaml b/pkg/converter/testdata/roles/role1/tasks/main.yaml deleted file mode 100644 index 0a50611a..00000000 --- a/pkg/converter/testdata/roles/role1/tasks/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- name: role1 | block1 - debug: - msg: echo "hello world" diff --git a/pkg/converter/tmpl/filter_extension.go b/pkg/converter/tmpl/filter_extension.go index 3817776e..9cc9ebcf 100644 --- a/pkg/converter/tmpl/filter_extension.go +++ b/pkg/converter/tmpl/filter_extension.go @@ -17,13 +17,14 @@ limitations under the License. package tmpl import ( + "encoding/json" "fmt" "math" - "path/filepath" "regexp" "strings" "github.com/flosch/pongo2/v6" + "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/util/version" ) @@ -32,8 +33,9 @@ func init() { pongo2.RegisterFilter("version", filterVersion) pongo2.RegisterFilter("pow", filterPow) pongo2.RegisterFilter("match", filterMatch) - pongo2.RegisterFilter("base", filterBasename) - pongo2.RegisterFilter("trim", filterTrim) + pongo2.RegisterFilter("to_json", filterToJson) + pongo2.RegisterFilter("to_yaml", filterToYaml) + pongo2.RegisterFilter("ip_range", filterIpRange) } func filterDefined(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) { @@ -51,32 +53,58 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo OrigError: err, } } - paramString := param.String() - customChoices := strings.Split(paramString, ",") - if len(customChoices) != 2 { - return nil, &pongo2.Error{ - Sender: "filter:version", - OrigError: fmt.Errorf("'version'-filter need 2 arguments(as: verion:'xxx,xxx') but got'%s'", paramString), + paramString := strings.TrimSpace(param.String()) + switch { + case strings.HasPrefix(paramString, ">="): + compareVersion := strings.TrimSpace(paramString[2:]) + ci, err := inVersion.Compare(compareVersion) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "filter:version", + OrigError: fmt.Errorf("converter second param error: %v", err), + } } - } - ci, err := inVersion.Compare(customChoices[1]) - if err != nil { - return pongo2.AsValue(nil), &pongo2.Error{ - Sender: "filter:version", - OrigError: fmt.Errorf("converter second param error: %v", err), - } - } - switch customChoices[0] { - case ">": - return pongo2.AsValue(ci == 1), nil - case "=": - return pongo2.AsValue(ci == 0), nil - case "<": - return pongo2.AsValue(ci == -1), nil - case ">=": return pongo2.AsValue(ci >= 0), nil - case "<=": + case strings.HasPrefix(paramString, "<="): + compareVersion := strings.TrimSpace(paramString[2:]) + ci, err := inVersion.Compare(compareVersion) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "filter:version", + OrigError: fmt.Errorf("converter second param error: %v", err), + } + } return pongo2.AsValue(ci <= 0), nil + case strings.HasPrefix(paramString, "=="): + compareVersion := strings.TrimSpace(paramString[2:]) + ci, err := inVersion.Compare(compareVersion) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "filter:version", + OrigError: fmt.Errorf("converter second param error: %v", err), + } + } + return pongo2.AsValue(ci == 0), nil + case strings.HasPrefix(paramString, ">"): + compareVersion := strings.TrimSpace(paramString[1:]) + ci, err := inVersion.Compare(compareVersion) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "filter:version", + OrigError: fmt.Errorf("converter second param error: %v", err), + } + } + return pongo2.AsValue(ci == 1), nil + case strings.HasPrefix(paramString, "<"): + compareVersion := strings.TrimSpace(paramString[1:]) + ci, err := inVersion.Compare(compareVersion) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "filter:version", + OrigError: fmt.Errorf("converter second param error: %v", err), + } + } + return pongo2.AsValue(ci == -1), nil default: return pongo2.AsValue(nil), &pongo2.Error{ Sender: "filter:version", @@ -97,10 +125,53 @@ func filterMatch(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2. return pongo2.AsValue(match), nil } -func filterBasename(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) { - return pongo2.AsValue(filepath.Base(in.String())), nil +func filterToJson(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) { + data, err := json.Marshal(in.Interface()) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "to_json", + OrigError: fmt.Errorf("parse in to json: %v", err), + } + } + result := string(data) + if param.IsInteger() { + result = Indent(param.Integer(), result) + } + return pongo2.AsValue(result), nil } -func filterTrim(in *pongo2.Value, param *pongo2.Value) (out *pongo2.Value, err *pongo2.Error) { - return pongo2.AsValue(strings.TrimPrefix(in.String(), param.String())), nil +func filterToYaml(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) { + if in.IsNil() { + return pongo2.AsValue(nil), nil + } + data, err := yaml.Marshal(in.Interface()) + if err != nil { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "to_yaml", + OrigError: fmt.Errorf("parse in to json: %v", err), + } + } + result := string(data) + if result == "{}\n" || result == "{}" { + return pongo2.AsValue(nil), nil + } + if !param.IsNil() && param.IsInteger() { + result = Indent(param.Integer(), result) + } + return pongo2.AsValue(result), nil +} + +func filterIpRange(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) { + if in.IsNil() || !in.IsString() { + return pongo2.AsValue(nil), &pongo2.Error{ + Sender: "ip_range", + OrigError: fmt.Errorf("input is not format string"), + } + } + var ipRange = make([]string, 0) + for _, s := range strings.Split(in.String(), ",") { + ipRange = append(ipRange, ParseIp(s)...) + } + + return pongo2.AsValue(ipRange), nil } diff --git a/pkg/converter/tmpl/filter_extension_test.go b/pkg/converter/tmpl/filter_extension_test.go index 25c1305e..e5195563 100644 --- a/pkg/converter/tmpl/filter_extension_test.go +++ b/pkg/converter/tmpl/filter_extension_test.go @@ -17,6 +17,7 @@ limitations under the License. package tmpl import ( + "encoding/json" "testing" "github.com/flosch/pongo2/v6" @@ -64,7 +65,7 @@ func TestFilter(t *testing.T) { }, { name: "version_greater", - input: "{{ test | version:'>=,v1.19.0' }}", + input: "{{ test | version:'>=v1.19.0' }}", ctx: map[string]any{ "test": "v1.23.10", }, @@ -91,9 +92,9 @@ func TestFilter(t *testing.T) { }, { name: "split", - input: "{{ kernelVersion | split:'-' | first }}", + input: "{{ kernel_version | split:'-' | first }}", ctx: map[string]any{ - "kernelVersion": "5.15.0-89-generic", + "kernel_version": "5.15.0-89-generic", }, except: "5.15.0", }, @@ -107,12 +108,38 @@ func TestFilter(t *testing.T) { except: "True", }, { - name: "trim", - input: "{{ test | trim:'v' }}", + name: "to_json", + input: "{{ test|to_json|safe }}", ctx: map[string]any{ - "test": "v1.7.0", + "test": []string{"a", "b"}, }, - except: "1.7.0", + except: "[\"a\",\"b\"]", + }, + { + name: "to_yaml", + input: "{{ test | to_yaml:4 }}", + ctx: map[string]any{ + "test": map[string]string{ + "a": "b/c/d:123", + }, + }, + except: " a: b/c/d:123\n ", + }, + { + name: "bool", + input: "{% if test %}a{% else %}b{% endif %}", + ctx: map[string]any{ + "test": true, + }, + except: "a", + }, + { + name: "number", + input: "a = {{ test }}", + ctx: map[string]any{ + "test": "23", + }, + except: "a = 23", }, } @@ -126,6 +153,12 @@ func TestFilter(t *testing.T) { if err != nil { t.Fatal(err) } + var v []string + if err := json.Unmarshal([]byte("[\""+result+"\"]"), &v); err != nil { + assert.Equal(t, tc.except, result) + } else { + assert.Equal(t, tc.except, v[0]) + } assert.Equal(t, tc.except, result) }) } diff --git a/pkg/converter/tmpl/helper.go b/pkg/converter/tmpl/helper.go new file mode 100644 index 00000000..a1e9f43c --- /dev/null +++ b/pkg/converter/tmpl/helper.go @@ -0,0 +1,139 @@ +package tmpl + +import ( + "encoding/binary" + "net" + "regexp" + "strconv" + "strings" +) + +// IsTmplSyntax Check if the string conforms to the template syntax. +func IsTmplSyntax(s string) bool { + return (strings.Contains(s, "{{") && strings.Contains(s, "}}")) || + (strings.Contains(s, "{%") && strings.Contains(s, "%}")) +} + +// Indent indents the given text by n spaces. +func Indent(n int, text string) string { + startOfLine := regexp.MustCompile(`(?m)^`) + indentation := strings.Repeat(" ", n) + return startOfLine.ReplaceAllLiteralString(text, indentation) +} + +func ParseIp(ip string) []string { + var availableIPs []string + // if ip is "1.1.1.1/",trim / + ip = strings.TrimRight(ip, "/") + if strings.Contains(ip, "/") == true { + if strings.Contains(ip, "/32") == true { + aip := strings.Replace(ip, "/32", "", -1) + availableIPs = append(availableIPs, aip) + } else { + availableIPs = GetAvailableIP(ip) + } + } else if strings.Contains(ip, "-") == true { + ipRange := strings.SplitN(ip, "-", 2) + availableIPs = GetAvailableIPRange(ipRange[0], ipRange[1]) + } else { + availableIPs = append(availableIPs, ip) + } + return availableIPs +} + +func GetAvailableIPRange(ipStart, ipEnd string) []string { + var availableIPs []string + + firstIP := net.ParseIP(ipStart) + endIP := net.ParseIP(ipEnd) + if firstIP.To4() == nil || endIP.To4() == nil { + return availableIPs + } + firstIPNum := ipToInt(firstIP.To4()) + EndIPNum := ipToInt(endIP.To4()) + pos := int32(1) + + newNum := firstIPNum + + for newNum <= EndIPNum { + availableIPs = append(availableIPs, intToIP(newNum).String()) + newNum = newNum + pos + } + return availableIPs +} + +func GetAvailableIP(ipAndMask string) []string { + var availableIPs []string + + ipAndMask = strings.TrimSpace(ipAndMask) + ipAndMask = IPAddressToCIDR(ipAndMask) + _, ipnet, _ := net.ParseCIDR(ipAndMask) + + firstIP, _ := networkRange(ipnet) + ipNum := ipToInt(firstIP) + size := networkSize(ipnet.Mask) + pos := int32(1) + max := size - 2 // -1 for the broadcast address, -1 for the gateway address + + var newNum int32 + for attempt := int32(0); attempt < max; attempt++ { + newNum = ipNum + pos + pos = pos%max + 1 + availableIPs = append(availableIPs, intToIP(newNum).String()) + } + return availableIPs +} + +func ipToInt(ip net.IP) int32 { + return int32(binary.BigEndian.Uint32(ip.To4())) +} + +func intToIP(n int32) net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + return net.IP(b) +} + +func IPAddressToCIDR(ipAddress string) string { + if strings.Contains(ipAddress, "/") == true { + ipAndMask := strings.Split(ipAddress, "/") + ip := ipAndMask[0] + mask := ipAndMask[1] + if strings.Contains(mask, ".") == true { + mask = IPMaskStringToCIDR(mask) + } + return ip + "/" + mask + } else { + return ipAddress + } +} + +func IPMaskStringToCIDR(netmask string) string { + netmaskList := strings.Split(netmask, ".") + var mint []int + for _, v := range netmaskList { + strv, _ := strconv.Atoi(v) + mint = append(mint, strv) + } + myIPMask := net.IPv4Mask(byte(mint[0]), byte(mint[1]), byte(mint[2]), byte(mint[3])) + ones, _ := myIPMask.Size() + return strconv.Itoa(ones) +} + +func networkRange(network *net.IPNet) (net.IP, net.IP) { + netIP := network.IP.To4() + firstIP := netIP.Mask(network.Mask) + lastIP := net.IPv4(0, 0, 0, 0).To4() + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +func networkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} diff --git a/pkg/converter/tmpl/template.go b/pkg/converter/tmpl/template.go index 6b3925e2..8da5fb40 100644 --- a/pkg/converter/tmpl/template.go +++ b/pkg/converter/tmpl/template.go @@ -22,36 +22,33 @@ import ( "github.com/flosch/pongo2/v6" "k8s.io/klog/v2" - - "github.com/kubesphere/kubekey/v4/pkg/variable" ) // ParseBool by pongo2 with not contain "{{ }}". It will add "{{ }}" to input string. -func ParseBool(v variable.VariableData, inputs []string) (bool, error) { +func ParseBool(ctx pongo2.Context, inputs []string) (bool, error) { for _, input := range inputs { - // first convert. + // first convert: parse variable like "{{ }}" in input intql, err := pongo2.FromString(input) if err != nil { klog.V(4).ErrorS(err, "Failed to get string") return false, err } - inres, err := intql.Execute(pongo2.Context(v)) + inres, err := intql.Execute(ctx) if err != nil { klog.V(4).ErrorS(err, "Failed to execute string") return false, err } + // second convert: add {{ }} to input. // trim line break. inres = strings.TrimSuffix(inres, "\n") inres = fmt.Sprintf("{{ %s }}", inres) - - // second convert. tql, err := pongo2.FromString(inres) if err != nil { klog.V(4).ErrorS(err, "failed to get string") return false, err } - result, err := tql.Execute(pongo2.Context(v)) + result, err := tql.Execute(ctx) if err != nil { klog.V(4).ErrorS(err, "failed to execute string") return false, err @@ -65,13 +62,16 @@ func ParseBool(v variable.VariableData, inputs []string) (bool, error) { } // ParseString with contain "{{ }}" -func ParseString(v variable.VariableData, input string) (string, error) { +func ParseString(ctx pongo2.Context, input string) (string, error) { + if len(ctx) == 0 || !IsTmplSyntax(input) { + return input, nil + } tql, err := pongo2.FromString(input) if err != nil { klog.V(4).ErrorS(err, "Failed to get string") return input, err } - result, err := tql.Execute(pongo2.Context(v)) + result, err := tql.Execute(ctx) if err != nil { klog.V(4).ErrorS(err, "Failed to execute string") return input, err @@ -80,13 +80,13 @@ func ParseString(v variable.VariableData, input string) (string, error) { return result, nil } -func ParseFile(v variable.VariableData, file []byte) (string, error) { +func ParseFile(ctx pongo2.Context, file []byte) (string, error) { tql, err := pongo2.FromBytes(file) if err != nil { klog.V(4).ErrorS(err, "Transfer file to template error") return "", err } - result, err := tql.Execute(pongo2.Context(v)) + result, err := tql.Execute(ctx) if err != nil { klog.V(4).ErrorS(err, "exec template error") return "", err diff --git a/pkg/converter/tmpl/template_test.go b/pkg/converter/tmpl/template_test.go index 1f8411a5..3af89c59 100644 --- a/pkg/converter/tmpl/template_test.go +++ b/pkg/converter/tmpl/template_test.go @@ -19,35 +19,43 @@ package tmpl import ( "testing" + "github.com/flosch/pongo2/v6" "github.com/stretchr/testify/assert" - - "github.com/kubesphere/kubekey/v4/pkg/variable" ) func TestParseBool(t *testing.T) { testcases := []struct { name string condition []string - variable variable.VariableData + variable pongo2.Context excepted bool }{ { name: "parse success", condition: []string{"foo == \"bar\""}, - variable: variable.VariableData{ + variable: pongo2.Context{ "foo": "bar", }, excepted: true, }, { - name: "in", + name: "in array", condition: []string{"test in inArr"}, - variable: variable.VariableData{ + variable: pongo2.Context{ "test": "a", "inArr": []string{"a", "b"}, }, excepted: true, }, + { + name: "container string", + condition: []string{"test in instr"}, + variable: pongo2.Context{ + "test": "a1", + "instr": "vda hjilsa1 sdte", + }, + excepted: true, + }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { @@ -61,17 +69,41 @@ func TestParseString(t *testing.T) { testcases := []struct { name string input string - variable variable.VariableData + variable pongo2.Context excepted string }{ { name: "parse success", input: "{{foo}}", - variable: map[string]any{ + variable: pongo2.Context{ "foo": "bar", }, excepted: "bar", }, + { + name: "parse in map", + input: "{% for _,v in value %}{{v.a}}{% endfor %}", + variable: pongo2.Context{ + "value": pongo2.Context{ + "foo": pongo2.Context{ + "a": "b", + }, + }, + }, + excepted: "b", + }, + { + name: "parse in", + input: "{% set k=value['foo'] %}{{ k.a }}", + variable: pongo2.Context{ + "value": pongo2.Context{ + "foo": pongo2.Context{ + "a": "b", + }, + }, + }, + excepted: "b", + }, } for _, tc := range testcases { @@ -85,12 +117,12 @@ func TestParseString(t *testing.T) { func TestParseFile(t *testing.T) { testcases := []struct { name string - variable variable.VariableData + variable pongo2.Context excepted string }{ { name: "parse success", - variable: map[string]any{ + variable: pongo2.Context{ "foo": "bar", }, excepted: "foo: bar", diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go new file mode 100644 index 00000000..4b6f5ba0 --- /dev/null +++ b/pkg/executor/executor.go @@ -0,0 +1,533 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package executor + +import ( + "context" + "fmt" + "path" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" + kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/converter" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" + "github.com/kubesphere/kubekey/v4/pkg/modules" + "github.com/kubesphere/kubekey/v4/pkg/project" + "github.com/kubesphere/kubekey/v4/pkg/proxy" + "github.com/kubesphere/kubekey/v4/pkg/variable" +) + +// TaskExecutor all task in pipeline +type TaskExecutor interface { + Exec(ctx context.Context) error +} + +func NewTaskExecutor(schema *runtime.Scheme, client ctrlclient.Client, pipeline *kubekeyv1.Pipeline) TaskExecutor { + if schema == nil { + schema = _const.Scheme + } + + if client == nil { + cli, err := proxy.NewLocalClient() + if err != nil { + return nil + } + client = cli + } + + // get variable + v, err := variable.GetVariable(client, *pipeline) + if err != nil { + klog.V(4).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return nil + } + + return &executor{ + schema: schema, + client: client, + pipeline: pipeline, + variable: v, + } +} + +type executor struct { + schema *runtime.Scheme + client ctrlclient.Client + + pipeline *kubekeyv1.Pipeline + variable variable.Variable +} + +type execBlockOptions struct { + // playbook level config + hosts []string // which hosts will run playbook + // blocks level config + blocks []kkcorev1.Block + role string // role name of blocks + when []string // when condition for blocks +} + +func (e executor) Exec(ctx context.Context) error { + e.pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning + defer func() { + // update pipeline phase + e.pipeline.Status.Phase = kubekeyv1.PipelinePhaseSucceed + if len(e.pipeline.Status.FailedDetail) != 0 { + e.pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed + } + }() + + klog.V(6).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + pj, err := project.New(*e.pipeline, true) + if err != nil { + klog.V(4).ErrorS(err, "Deal project error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + return err + } + + // convert to transfer.Playbook struct + playbookPath := e.pipeline.Spec.Playbook + if path.IsAbs(playbookPath) { + playbookPath = playbookPath[1:] + } + pb, err := pj.MarshalPlaybook() + if err != nil { + klog.V(4).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + return err + } + + for _, play := range pb.Play { + if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { + // if not match the tags. skip + continue + } + // hosts should contain all host's name. hosts should not be empty. + var hosts []string + if ahn, err := e.variable.Get(variable.GetHostnames(play.PlayHost.Hosts)); err == nil { + hosts = ahn.([]string) + } + if len(hosts) == 0 { // if hosts is empty skip this playbook + klog.V(4).Info("Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + continue + } + + // when gather_fact is set. get host's information from remote. + if play.GatherFacts { + for _, h := range hosts { + gfv, err := getGatherFact(ctx, h, e.variable) + if err != nil { + klog.V(4).ErrorS(err, "Get gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h) + return err + } + // merge host information to runtime variable + if err := e.variable.Merge(variable.MergeRemoteVariable(h, gfv)); err != nil { + klog.V(4).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h) + return err + } + } + } + + // Batch execution, with each batch being a group of hosts run in serial. + var batchHosts [][]string + if play.RunOnce { + // runOnce only run in first node + batchHosts = [][]string{{hosts[0]}} + } else { + // group hosts by serial. run the playbook by serial + batchHosts, err = converter.GroupHostBySerial(hosts, play.Serial.Data) + if err != nil { + klog.V(4).ErrorS(err, "Group host by serial error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + return err + } + } + + // generate task by each batch. + for _, serials := range batchHosts { + // each batch hosts should not be empty. + if len(serials) == 0 { + klog.V(4).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + return fmt.Errorf("host is empty") + } + + if err := e.mergeVariable(ctx, e.variable, play.Vars, serials...); err != nil { + klog.V(4).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", play.Name) + return err + } + + // generate task from pre tasks + if err := e.execBlock(ctx, execBlockOptions{ + hosts: serials, + blocks: play.PreTasks, + }); err != nil { + klog.V(4).ErrorS(err, "Get pre task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "play", play.Name) + return err + } + + // generate task from role + for _, role := range play.Roles { + if err := e.mergeVariable(ctx, e.variable, role.Vars, serials...); err != nil { + klog.V(4).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", role.Name) + return err + } + + if err := e.execBlock(ctx, execBlockOptions{ + hosts: serials, + blocks: role.Block, + role: role.Role, + when: role.When.Data, + }); err != nil { + klog.V(4).ErrorS(err, "Get role task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "play", play.Name, "role", role.Role) + return err + } + } + // generate task from tasks + if err := e.execBlock(ctx, execBlockOptions{ + hosts: serials, + blocks: play.Tasks, + }); err != nil { + klog.V(4).ErrorS(err, "Get task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "play", play.Name) + return err + } + // generate task from post tasks + if err := e.execBlock(ctx, execBlockOptions{ + hosts: serials, + blocks: play.Tasks, + }); err != nil { + klog.V(4).ErrorS(err, "Get post task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "play", play.Name) + return err + } + } + } + return nil +} + +func (e executor) execBlock(ctx context.Context, options execBlockOptions) error { + for _, at := range options.blocks { + if !at.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { + continue + } + hosts := options.hosts + if at.RunOnce { // only run in first host + hosts = []string{options.hosts[0]} + } + + // merge variable which defined in block + if err := e.mergeVariable(ctx, e.variable, at.Vars, hosts...); err != nil { + klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", at.Name) + return err + } + + switch { + case len(at.Block) != 0: + // exec block + if err := e.execBlock(ctx, execBlockOptions{ + hosts: hosts, + role: options.role, + blocks: at.Block, + when: append(options.when, at.When.Data...), + }); err != nil { + klog.V(4).ErrorS(err, "Get block task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + + // if block exec failed exec rescue + if e.pipeline.Status.Phase == kubekeyv1.PipelinePhaseFailed && len(at.Rescue) != 0 { + if err := e.execBlock(ctx, execBlockOptions{ + hosts: hosts, + blocks: at.Rescue, + role: options.role, + when: append(options.when, at.When.Data...), + }); err != nil { + klog.V(4).ErrorS(err, "Get rescue task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + } + + // exec always after block + if len(at.Always) != 0 { + if err := e.execBlock(ctx, execBlockOptions{ + hosts: hosts, + blocks: at.Always, + role: options.role, + when: append(options.when, at.When.Data...), + }); err != nil { + klog.V(4).ErrorS(err, "Get always task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + } + + case at.IncludeTasks != "": + // include tasks has converted to blocks. + // do nothing + default: + task := converter.MarshalBlock(ctx, options.role, hosts, append(options.when, at.When.Data...), at) + // complete by pipeline + task.GenerateName = e.pipeline.Name + "-" + task.Namespace = e.pipeline.Namespace + if err := controllerutil.SetControllerReference(e.pipeline, task, e.schema); err != nil { + klog.V(4).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + // complete module by unknown field + for n, a := range at.UnknownFiled { + data, err := json.Marshal(a) + if err != nil { + klog.V(4).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name, "field", n) + return err + } + if m := modules.FindModule(n); m != nil { + task.Spec.Module.Name = n + task.Spec.Module.Args = runtime.RawExtension{Raw: data} + break + } + } + if task.Spec.Module.Name == "" { // action is necessary for a task + klog.V(4).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return fmt.Errorf("no module/action detected in task: %s", task.Name) + } + // create task + if err := e.client.Create(ctx, task); err != nil { + klog.V(4).ErrorS(err, "create task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + + for { + klog.Infof("[Task %s] task exec \"%s\" begin for %v times", ctrlclient.ObjectKeyFromObject(task), task.Spec.Name, task.Status.RestartCount+1) + // exec task + task.Status.Phase = kubekeyv1alpha1.TaskPhaseRunning + if err := e.client.Status().Update(ctx, task); err != nil { + klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task)) + } + if err := e.executeTask(ctx, task, options); err != nil { + klog.V(4).ErrorS(err, "exec task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) + return err + } + if err := e.client.Status().Update(ctx, task); err != nil { + klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task)) + return err + } + + if task.IsComplete() { + break + } + } + klog.Infof("[Task %s] task exec \"%s\" end status is %s", ctrlclient.ObjectKeyFromObject(task), task.Spec.Name, task.Status.Phase) + e.pipeline.Status.TaskResult.Total++ + switch task.Status.Phase { + case kubekeyv1alpha1.TaskPhaseSuccess: + e.pipeline.Status.TaskResult.Success++ + case kubekeyv1alpha1.TaskPhaseIgnored: + e.pipeline.Status.TaskResult.Ignored++ + case kubekeyv1alpha1.TaskPhaseFailed: + e.pipeline.Status.TaskResult.Failed++ + } + + // exit when task run failed + if task.IsFailed() { + var hostReason []kubekeyv1.PipelineFailedDetailHost + for _, tr := range task.Status.FailedDetail { + hostReason = append(hostReason, kubekeyv1.PipelineFailedDetailHost{ + Host: tr.Host, + Stdout: tr.Stdout, + StdErr: tr.StdErr, + }) + } + e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kubekeyv1.PipelineFailedDetail{ + Task: task.Spec.Name, + Hosts: hostReason, + }) + e.pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed + e.pipeline.Status.Reason = fmt.Sprintf("task %s run failed", task.Name) + return fmt.Errorf("task %s run failed", task.Name) + } + } + + } + return nil +} + +func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, options execBlockOptions) error { + cd := kubekeyv1alpha1.TaskCondition{ + StartTimestamp: metav1.Now(), + } + defer func() { + cd.EndTimestamp = metav1.Now() + task.Status.Conditions = append(task.Status.Conditions, cd) + }() + + // check task host results + wg := &wait.Group{} + dataChan := make(chan kubekeyv1alpha1.TaskHostResult, len(task.Spec.Hosts)) + for _, h := range task.Spec.Hosts { + host := h + wg.StartWithContext(ctx, func(ctx context.Context) { + var stdout, stderr string + defer func() { + if stderr != "" { + klog.Errorf("[Task %s] run failed: %s", ctrlclient.ObjectKeyFromObject(task), stderr) + } + + if task.Spec.Register != "" { + // set variable to parent location + if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{ + task.Spec.Register: map[string]string{ + "stdout": stdout, + "stderr": stderr, + }, + })); err != nil { + stderr = fmt.Sprintf("register task result to variable error: %v", err) + return + } + } + // fill result + dataChan <- kubekeyv1alpha1.TaskHostResult{ + Host: host, + Stdout: stdout, + StdErr: stderr, + } + }() + + ha, err := e.variable.Get(variable.GetAllVariable(host)) + if err != nil { + stderr = fmt.Sprintf("get variable error: %v", err) + return + } + // check when condition + if len(task.Spec.When) > 0 { + ok, err := tmpl.ParseBool(ha.(map[string]any), task.Spec.When) + if err != nil { + stderr = fmt.Sprintf("parse when condition error: %v", err) + return + } + if !ok { + stdout = "skip" + return + } + } + + // execute module with loop + loop, err := e.execLoop(ctx, ha.(map[string]any), task) + if err != nil { + stderr = fmt.Sprintf("parse loop vars error: %v", err) + return + } + + for _, item := range loop { + // set item to runtime variable + if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{ + "item": item, + })); err != nil { + stderr = fmt.Sprintf("set loop item to variable error: %v", err) + return + } + stdout, stderr = e.executeModule(ctx, task, modules.ExecOptions{ + Args: task.Spec.Module.Args, + Host: host, + Variable: e.variable, + Task: *task, + Pipeline: *e.pipeline, + }) + // delete item + if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{ + "item": nil, + })); err != nil { + stderr = fmt.Sprintf("clean loop item to variable error: %v", err) + return + } + } + }) + } + go func() { + wg.Wait() + close(dataChan) + }() + + task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess + for data := range dataChan { + if data.StdErr != "" { + if task.Spec.IgnoreError { + task.Status.Phase = kubekeyv1alpha1.TaskPhaseIgnored + } else { + task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed + task.Status.FailedDetail = append(task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{ + Host: data.Host, + Stdout: data.Stdout, + StdErr: data.StdErr, + }) + } + } + cd.HostResults = append(cd.HostResults, data) + } + + return nil +} + +func (e executor) execLoop(ctx context.Context, ha map[string]any, task *kubekeyv1alpha1.Task) ([]any, error) { + switch { + case task.Spec.Loop.Raw == nil: + // loop is not set. add one element to execute once module. + return []any{nil}, nil + default: + return variable.Extension2Slice(ha, task.Spec.Loop), nil + } +} + +func (e executor) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task, opts modules.ExecOptions) (string, string) { + lg, err := opts.Variable.Get(variable.GetAllVariable(opts.Host)) + if err != nil { + klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(task)) + return "", err.Error() + } + + // check failed when condition + if len(task.Spec.FailedWhen) > 0 { + ok, err := tmpl.ParseBool(lg.(map[string]any), task.Spec.FailedWhen) + if err != nil { + klog.V(5).ErrorS(err, "validate FailedWhen condition error", "task", ctrlclient.ObjectKeyFromObject(task)) + return "", err.Error() + } + if ok { + return "", "failed by failedWhen" + } + } + + return modules.FindModule(task.Spec.Module.Name)(ctx, opts) +} + +// merge defined variable to host variable +func (e executor) mergeVariable(ctx context.Context, v variable.Variable, vd map[string]any, hosts ...string) error { + if len(vd) == 0 { + // skip + return nil + } + for _, host := range hosts { + + if err := v.Merge(variable.MergeRuntimeVariable(host, vd)); err != nil { + return err + } + } + return nil +} diff --git a/pkg/task/helper.go b/pkg/executor/helper.go similarity index 89% rename from pkg/task/helper.go rename to pkg/executor/helper.go index 91d02915..15072202 100644 --- a/pkg/task/helper.go +++ b/pkg/executor/helper.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package task +package executor import ( "bufio" @@ -29,13 +29,18 @@ import ( ) // getGatherFact get host info -func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (variable.VariableData, error) { - v, err := vars.Get(variable.HostVars{HostName: hostname}) +func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (map[string]any, error) { + v, err := vars.Get(variable.GetParamVariable(hostname)) if err != nil { klog.V(4).ErrorS(err, "Get host variable error", "hostname", hostname) return nil, err } - conn := connector.NewConnector(hostname, v.(variable.VariableData)) + + conn, err := connector.NewConnector(hostname, v.(map[string]any)) + if err != nil { + klog.V(4).ErrorS(err, "New connector error", "hostname", hostname) + return nil, err + } if err := conn.Init(ctx); err != nil { klog.V(4).ErrorS(err, "Init connection error", "hostname", hostname) return nil, err @@ -43,7 +48,7 @@ func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) defer conn.Close(ctx) // os information - osVars := make(variable.VariableData) + osVars := make(map[string]any) var osRelease bytes.Buffer if err := conn.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil { klog.V(4).ErrorS(err, "Fetch os-release error", "hostname", hostname) @@ -55,7 +60,7 @@ func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) klog.V(4).ErrorS(err, "Get kernel version error", "hostname", hostname) return nil, err } - osVars["kernelVersion"] = string(bytes.TrimSuffix(kernel, []byte("\n"))) + osVars["kernel_version"] = string(bytes.TrimSuffix(kernel, []byte("\n"))) hn, err := conn.ExecuteCommand(ctx, "hostname") if err != nil { klog.V(4).ErrorS(err, "Get hostname error", "hostname", hostname) @@ -70,7 +75,7 @@ func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) osVars["architecture"] = string(bytes.TrimSuffix(arch, []byte("\n"))) // process information - procVars := make(variable.VariableData) + procVars := make(map[string]any) var cpu bytes.Buffer if err := conn.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil { klog.V(4).ErrorS(err, "Fetch cpuinfo error", "hostname", hostname) @@ -84,7 +89,7 @@ func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) } procVars["memInfo"] = convertBytesToMap(mem.Bytes(), ":") - return variable.VariableData{ + return map[string]any{ "os": osVars, "process": procVars, }, nil diff --git a/pkg/task/helper_test.go b/pkg/executor/helper_test.go similarity index 99% rename from pkg/task/helper_test.go rename to pkg/executor/helper_test.go index 67f16499..b6fc5d34 100644 --- a/pkg/task/helper_test.go +++ b/pkg/executor/helper_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package task +package executor import ( "testing" diff --git a/pkg/manager/command_manager.go b/pkg/manager/command_manager.go index 46990dd7..c9a59869 100644 --- a/pkg/manager/command_manager.go +++ b/pkg/manager/command_manager.go @@ -18,21 +18,15 @@ package manager import ( "context" - "fmt" "os" - "syscall" - "time" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" - "github.com/kubesphere/kubekey/v4/pkg/controllers" - "github.com/kubesphere/kubekey/v4/pkg/task" - "github.com/kubesphere/kubekey/v4/pkg/variable" + "github.com/kubesphere/kubekey/v4/pkg/executor" ) type commandManager struct { @@ -58,9 +52,11 @@ func (m *commandManager) Run(ctx context.Context) error { klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) return err } + klog.Infof("[Pipeline %s] start", ctrlclient.ObjectKeyFromObject(m.Pipeline)) defer func() { - klog.Infof("[Pipeline %s] finish", ctrlclient.ObjectKeyFromObject(m.Pipeline)) + klog.Infof("[Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), + m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed) // update pipeline status if err := m.Client.Status().Update(ctx, m.Pipeline); err != nil { klog.ErrorS(err, "Update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) @@ -73,53 +69,13 @@ func (m *commandManager) Run(ctx context.Context) error { klog.ErrorS(err, "Clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir()) } } - // kill by signal - if err := syscall.Kill(os.Getpid(), syscall.SIGTERM); err != nil { - klog.ErrorS(err, "Kill process error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - } }() klog.Infof("[Pipeline %s] start task controller", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - kd, err := task.NewController(task.ControllerOptions{ - Scheme: m.Scheme, - VariableCache: variable.Cache, - Client: m.Client, - TaskReconciler: &controllers.TaskReconciler{ - Client: m.Client, - }, - }) - if err != nil { + if err := executor.NewTaskExecutor(m.Scheme, m.Client, m.Pipeline).Exec(ctx); err != nil { klog.ErrorS(err, "Create task controller error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed - m.Pipeline.Status.Reason = fmt.Sprintf("create task controller failed: %v", err) return err } - // init pipeline status - m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning - if err := kd.AddTasks(ctx, m.Pipeline); err != nil { - klog.ErrorS(err, "Add task error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed - m.Pipeline.Status.Reason = fmt.Sprintf("add task to controller failed: %v", err) - return err - } - // update pipeline status - if err := m.Client.Status().Update(ctx, m.Pipeline); err != nil { - klog.ErrorS(err, "Update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - return err - } - klog.Infof("[Pipeline %s] start deal task total %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), m.Pipeline.Status.TaskResult.Total) - go kd.Start(ctx) - - _ = wait.PollUntilContextCancel(ctx, time.Millisecond*100, false, func(ctx context.Context) (done bool, err error) { - if err := m.Client.Get(ctx, ctrlclient.ObjectKeyFromObject(m.Pipeline), m.Pipeline); err != nil { - klog.ErrorS(err, "Get pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) - return false, nil - } - if m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseFailed || m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed { - return true, nil - } - return false, nil - }) return nil } diff --git a/pkg/manager/controller_manager.go b/pkg/manager/controller_manager.go index 8bad8791..72e53452 100644 --- a/pkg/manager/controller_manager.go +++ b/pkg/manager/controller_manager.go @@ -27,8 +27,6 @@ import ( _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/controllers" "github.com/kubesphere/kubekey/v4/pkg/proxy" - "github.com/kubesphere/kubekey/v4/pkg/task" - "github.com/kubesphere/kubekey/v4/pkg/variable" ) type controllerManager struct { @@ -57,30 +55,10 @@ func (c controllerManager) Run(ctx context.Context) error { return err } - taskController, err := task.NewController(task.ControllerOptions{ - Scheme: mgr.GetScheme(), - VariableCache: variable.Cache, - MaxConcurrent: c.MaxConcurrentReconciles, - Client: mgr.GetClient(), - TaskReconciler: &controllers.TaskReconciler{ - Client: mgr.GetClient(), - }, - }) - if err != nil { - klog.ErrorS(err, "Create task controller error") - return err - } - - // add task controller to manager - if err := mgr.Add(taskController); err != nil { - klog.ErrorS(err, "Add task controller error") - return err - } - if err := (&controllers.PipelineReconciler{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor("pipeline"), - TaskController: taskController, + Client: mgr.GetClient(), + EventRecorder: mgr.GetEventRecorderFor("pipeline"), + Scheme: mgr.GetScheme(), }).SetupWithManager(ctx, mgr, controllers.Options{ ControllerGates: c.ControllerGates, Options: ctrlcontroller.Options{ diff --git a/pkg/modules/assert.go b/pkg/modules/assert.go index 9b62eac9..56079a4b 100644 --- a/pkg/modules/assert.go +++ b/pkg/modules/assert.go @@ -19,35 +19,34 @@ package modules import ( "context" + "k8s.io/klog/v2" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/variable" ) func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) { - args := variable.Extension2Variables(options.Args) - that := variable.StringSliceVar(args, "that") - if that == nil { - st := variable.StringVar(args, "that") - if st == nil { - return "", "\"that\" should be []string or string" - } - that = []string{*st} - } - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) return "", err.Error() } - ok, err := tmpl.ParseBool(lg.(variable.VariableData), that) + + args := variable.Extension2Variables(options.Args) + thatParam, err := variable.StringSliceVar(ha.(map[string]any), args, "that") + if err != nil { + return "", "\"that\" should be []string or string" + } + + ok, err := tmpl.ParseBool(ha.(map[string]any), thatParam) if err != nil { return "", err.Error() } if ok { - if v := variable.StringVar(args, "success_msg"); v != nil { - if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil { + if successMsgParam, err := variable.StringVar(ha.(map[string]any), args, "success_msg"); err == nil { + if r, err := tmpl.ParseString(ha.(map[string]any), successMsgParam); err != nil { return "", err.Error() } else { return r, "" @@ -55,15 +54,15 @@ func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) { } return "True", "" } else { - if v := variable.StringVar(args, "fail_msg"); v != nil { - if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil { + if failMsgParam, err := variable.StringVar(ha.(map[string]any), args, "fail_msg"); err == nil { + if r, err := tmpl.ParseString(ha.(map[string]any), failMsgParam); err != nil { return "", err.Error() } else { return "False", r } } - if v := variable.StringVar(args, "msg"); v != nil { - if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil { + if msgParam, err := variable.StringVar(ha.(map[string]any), args, "msg"); err == nil { + if r, err := tmpl.ParseString(ha.(map[string]any), msgParam); err != nil { return "", err.Error() } else { return "False", r diff --git a/pkg/modules/assert_test.go b/pkg/modules/assert_test.go index 0ab46691..8b1be2d1 100644 --- a/pkg/modules/assert_test.go +++ b/pkg/modules/assert_test.go @@ -35,8 +35,9 @@ func TestAssert(t *testing.T) { { name: "non-that", opt: ExecOptions{ - Host: "local", - Args: runtime.RawExtension{}, + Host: "local", + Variable: &testVariable{}, + Args: runtime.RawExtension{}, }, exceptStderr: "\"that\" should be []string or string", }, diff --git a/pkg/modules/command.go b/pkg/modules/command.go index d83ee234..6638636f 100644 --- a/pkg/modules/command.go +++ b/pkg/modules/command.go @@ -22,41 +22,36 @@ import ( "k8s.io/klog/v2" - "github.com/kubesphere/kubekey/v4/pkg/connector" "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/variable" ) func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) { - ha, _ := options.Variable.Get(variable.HostVars{HostName: options.Host}) - var conn connector.Connector - if v := ctx.Value("connector"); v != nil { - conn = v.(connector.Connector) - } else { - conn = connector.NewConnector(options.Host, ha.(variable.VariableData)) - } - if err := conn.Init(ctx); err != nil { - klog.V(4).ErrorS(err, "failed to init connector") + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) return "", err.Error() } - defer conn.Close(ctx) - - // convert command template to string - arg := variable.Extension2String(options.Args) - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) + // args + commandParam, err := variable.Extension2String(ha.(map[string]any), options.Args) if err != nil { return "", err.Error() } - result, err := tmpl.ParseString(lg.(variable.VariableData), arg) + // get connector + conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + if err != nil { + return "", err.Error() + } + defer conn.Close(ctx) + // execute command + command, err := tmpl.ParseString(ha.(map[string]any), commandParam) if err != nil { return "", err.Error() } // execute command var stdout, stderr string - data, err := conn.ExecuteCommand(ctx, result) + data, err := conn.ExecuteCommand(ctx, command) if err != nil { stderr = err.Error() } diff --git a/pkg/modules/command_test.go b/pkg/modules/command_test.go index 22c84582..7e564cf0 100644 --- a/pkg/modules/command_test.go +++ b/pkg/modules/command_test.go @@ -40,7 +40,7 @@ func TestCommand(t *testing.T) { Variable: &testVariable{}, }, ctx: context.Background(), - exceptStderr: "host is not set", + exceptStderr: "cannot find variable \"ssh_host\"", }, { name: "exec command success", diff --git a/pkg/modules/copy.go b/pkg/modules/copy.go index 43d7eeeb..9631974a 100644 --- a/pkg/modules/copy.go +++ b/pkg/modules/copy.go @@ -18,6 +18,7 @@ package modules import ( "context" + "fmt" "io/fs" "os" "path/filepath" @@ -26,147 +27,162 @@ import ( "k8s.io/klog/v2" kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - "github.com/kubesphere/kubekey/v4/pkg/connector" - "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/project" "github.com/kubesphere/kubekey/v4/pkg/variable" ) func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) { - // check args - args := variable.Extension2Variables(options.Args) - src := variable.StringVar(args, "src") - content := variable.StringVar(args, "content") - if src == nil && content == nil { - return "", "\"src\" or \"content\" in args should be string" - } - dest := variable.StringVar(args, "dest") - if dest == nil { - return "", "\"dest\" in args should be string" - } - lv, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) if err != nil { - klog.V(4).ErrorS(err, "failed to get location vars") - return "", err.Error() - } - destStr, err := tmpl.ParseString(lv.(variable.VariableData), *dest) - if err != nil { - klog.V(4).ErrorS(err, "template parse dest error") + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) return "", err.Error() } - var conn connector.Connector - if v := ctx.Value("connector"); v != nil { - conn = v.(connector.Connector) - } else { - // get connector - ha, err := options.Variable.Get(variable.HostVars{HostName: options.Host}) - if err != nil { - klog.V(4).ErrorS(err, "failed to get host vars") - return "", err.Error() - } - conn = connector.NewConnector(options.Host, ha.(variable.VariableData)) + // check args + // todo should add policy? + args := variable.Extension2Variables(options.Args) + srcParam, _ := variable.StringVar(ha.(map[string]any), args, "src") + contentParam, _ := variable.StringVar(ha.(map[string]any), args, "content") + if srcParam == "" && contentParam == "" { + return "", "\"src\" or \"content\" in args should be string" } - if err := conn.Init(ctx); err != nil { - klog.V(4).ErrorS(err, "failed to init connector") + destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + if err != nil { + return "", "\"dest\" in args should be string" + } + + // get connector + conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + if err != nil { return "", err.Error() } defer conn.Close(ctx) - if src != nil { - // convert src - srcStr, err := tmpl.ParseString(lv.(variable.VariableData), *src) - if err != nil { - klog.V(4).ErrorS(err, "template parse src error") - return "", err.Error() - } - var baseFS fs.FS - if filepath.IsAbs(srcStr) { - baseFS = os.DirFS("/") - } else { - projectFs, err := project.New(project.Options{Pipeline: &options.Pipeline}).FS(ctx, false) + switch { + case srcParam != "": // copy local file to remote + if filepath.IsAbs(srcParam) { // if src is absolute path. find it in local path + fileInfo, err := os.Stat(srcParam) if err != nil { - klog.V(4).ErrorS(err, "failed to get project fs") - return "", err.Error() + return "", fmt.Sprintf(" get src file %s in local path error: %v", srcParam, err) } - baseFS = projectFs - } - roleName := options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole] - flPath := project.GetFilesFromPlayBook(baseFS, options.Pipeline.Spec.Playbook, roleName, srcStr) - fileInfo, err := fs.Stat(baseFS, flPath) - if err != nil { - klog.V(4).ErrorS(err, "failed to get src file in local") - return "", err.Error() - } - if fileInfo.IsDir() { - // src is dir - if err := fs.WalkDir(baseFS, flPath, func(path string, info fs.DirEntry, err error) error { - if err != nil { - klog.V(4).ErrorS(err, "failed to walk dir") - return err - } - rel, err := filepath.Rel(srcStr, path) - if err != nil { - klog.V(4).ErrorS(err, "failed to get relative path") - return err - } - if info.IsDir() { + + if fileInfo.IsDir() { // src is dir + if err := filepath.WalkDir(srcParam, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %v", srcParam, err) + } + + // get file old mode + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %v", err) + } + mode := info.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + // read file + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("read file error: %v", err) + } + // copy file to remote + if err := conn.CopyFile(ctx, data, path, mode); err != nil { + return fmt.Errorf("copy file error: %v", err) + } return nil + }); err != nil { + return "", fmt.Sprintf(" walk dir %s in local path error: %v", srcParam, err) } - fi, err := info.Info() + } else { // src is file + data, err := os.ReadFile(srcParam) if err != nil { - klog.V(4).ErrorS(err, "failed to get file info") - return err + return "", fmt.Sprintf("read file error: %v", err) } - mode := fi.Mode() - if variable.IntVar(args, "mode") != nil { - mode = os.FileMode(*variable.IntVar(args, "mode")) + if strings.HasSuffix(destParam, "/") { + destParam = destParam + filepath.Base(srcParam) } - data, err := fs.ReadFile(baseFS, rel) - if err != nil { - klog.V(4).ErrorS(err, "failed to read file") - return err + mode := fileInfo.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) } - if err := conn.CopyFile(ctx, data, filepath.Join(destStr, rel), mode); err != nil { - klog.V(4).ErrorS(err, "failed to copy file", "src", srcStr, "dest", destStr) - return err + if err := conn.CopyFile(ctx, data, destParam, mode); err != nil { + return "", fmt.Sprintf("copy file error: %v", err) } - return nil - }); err != nil { - klog.V(4).ErrorS(err, "failed to walk dir") - return "", err.Error() } - } else { - // src is file - data, err := fs.ReadFile(baseFS, flPath) + } else { // if src is not absolute path. find file in project + pj, err := project.New(options.Pipeline, false) if err != nil { - klog.V(4).ErrorS(err, "failed to read file") - return "", err.Error() + return "", fmt.Sprintf("get project error: %v", err) } - if strings.HasSuffix(destStr, "/") { - destStr = destStr + filepath.Base(srcStr) + fileInfo, err := pj.Stat(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return "", fmt.Sprintf("get file %s from project error %v", srcParam, err) } - if err := conn.CopyFile(ctx, data, destStr, fileInfo.Mode()); err != nil { - klog.V(4).ErrorS(err, "failed to copy file", "src", srcStr, "dest", destStr) - return "", err.Error() + + if fileInfo.IsDir() { + if err := pj.WalkDir(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %v", srcParam, err) + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %v", err) + } + mode := info.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + data, err := pj.ReadFile(path, project.GetFileOption{Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return fmt.Errorf("read file error: %v", err) + } + if err := conn.CopyFile(ctx, data, path, mode); err != nil { + return fmt.Errorf("copy file error: %v", err) + } + return nil + }); err != nil { + return "", fmt.Sprintf("") + } + } else { + data, err := pj.ReadFile(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return "", fmt.Sprintf("read file error: %v", err) + } + if strings.HasSuffix(destParam, "/") { + destParam = destParam + filepath.Base(srcParam) + } + mode := fileInfo.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + if err := conn.CopyFile(ctx, data, destParam, mode); err != nil { + return "", fmt.Sprintf("copy file error: %v", err) + } } - return "success", "" } - } else if content != nil { - if strings.HasSuffix(destStr, "/") { + + case contentParam != "": // convert content param and copy to remote + if strings.HasSuffix(destParam, "/") { return "", "\"content\" should copy to a file" } mode := os.ModePerm - if v := variable.IntVar(args, "mode"); v != nil { - mode = os.FileMode(*v) + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) } - if err := conn.CopyFile(ctx, []byte(*content), destStr, mode); err != nil { + if err := conn.CopyFile(ctx, []byte(contentParam), destParam, mode); err != nil { return "", err.Error() } } return "success", "" + } diff --git a/pkg/modules/copy_test.go b/pkg/modules/copy_test.go index cb0187a5..f7e5b074 100644 --- a/pkg/modules/copy_test.go +++ b/pkg/modules/copy_test.go @@ -39,9 +39,11 @@ func TestCopy(t *testing.T) { opt: ExecOptions{ Args: runtime.RawExtension{}, Host: "local", - Variable: nil, + Variable: &testVariable{}, }, - ctx: context.Background(), + ctx: context.WithValue(context.Background(), "connector", &testConnector{ + output: []byte("success"), + }), exceptStderr: "\"src\" or \"content\" in args should be string", }, { @@ -51,9 +53,11 @@ func TestCopy(t *testing.T) { Raw: []byte(`{"content": "hello world"}`), }, Host: "local", - Variable: nil, + Variable: &testVariable{}, }, - ctx: context.Background(), + ctx: context.WithValue(context.Background(), "connector", &testConnector{ + output: []byte("success"), + }), exceptStderr: "\"dest\" in args should be string", }, { diff --git a/pkg/modules/debug.go b/pkg/modules/debug.go index 8699a73f..79b77690 100644 --- a/pkg/modules/debug.go +++ b/pkg/modules/debug.go @@ -27,39 +27,29 @@ import ( ) func ModuleDebug(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) + return "", err.Error() + } + args := variable.Extension2Variables(options.Args) - if v := variable.StringVar(args, "var"); v != nil { - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) + // var is defined. return the value of var + if varParam, err := variable.StringVar(ha.(map[string]any), args, "var"); err == nil { + result, err := tmpl.ParseString(ha.(map[string]any), fmt.Sprintf("{{ %s }}", varParam)) if err != nil { - klog.V(4).ErrorS(err, "Failed to get location vars") - return "", err.Error() - } - result, err := tmpl.ParseString(lg.(variable.VariableData), fmt.Sprintf("{{ %s }}", *v)) - if err != nil { - klog.V(4).ErrorS(err, "Failed to get var") + klog.V(4).ErrorS(err, "Failed to parse var") return "", err.Error() } return result, "" } - - if v := variable.StringVar(args, "msg"); v != nil { - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - klog.V(4).ErrorS(err, "Failed to get location vars") - return "", err.Error() - } - result, err := tmpl.ParseString(lg.(variable.VariableData), *v) - if err != nil { - klog.V(4).ErrorS(err, "Failed to get var") - return "", err.Error() - } - return result, "" + // msg is defined. return the actual msg + if msgParam, err := variable.StringVar(ha.(map[string]any), args, "msg"); err == nil { + return msgParam, "" + } + if err != nil { + return "", err.Error() } return "", "unknown args for debug. only support var or msg" diff --git a/pkg/modules/debug_test.go b/pkg/modules/debug_test.go index 5e07e0c0..0644be27 100644 --- a/pkg/modules/debug_test.go +++ b/pkg/modules/debug_test.go @@ -35,8 +35,9 @@ func TestDebug(t *testing.T) { { name: "non-var and non-msg", opt: ExecOptions{ - Args: runtime.RawExtension{}, - Host: "local", + Args: runtime.RawExtension{}, + Host: "local", + Variable: &testVariable{}, }, exceptStderr: "unknown args for debug. only support var or msg", }, diff --git a/pkg/modules/fetch.go b/pkg/modules/fetch.go new file mode 100644 index 00000000..7194e6b6 --- /dev/null +++ b/pkg/modules/fetch.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modules + +import ( + "context" + "os" + "path/filepath" + + "k8s.io/klog/v2" + + "github.com/kubesphere/kubekey/v4/pkg/variable" +) + +func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) + return "", err.Error() + } + // check args + args := variable.Extension2Variables(options.Args) + srcParam, err := variable.StringVar(ha.(map[string]any), args, "src") + if err != nil { + return "", "\"src\" in args should be string" + } + destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + if err != nil { + return "", "\"dest\" in args should be string" + } + + // get connector + conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + if err != nil { + return "", err.Error() + } + defer conn.Close(ctx) + + // fetch file + if _, err := os.Stat(filepath.Dir(destParam)); os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(destParam), 0755); err != nil { + klog.V(4).ErrorS(err, "failed to create dest dir") + return "", err.Error() + } + } + destFile, err := os.Create(destParam) + if err != nil { + klog.V(4).ErrorS(err, "failed to create dest file") + return "", err.Error() + } + defer destFile.Close() + + if err := conn.FetchFile(ctx, srcParam, destFile); err != nil { + klog.V(4).ErrorS(err, "failed to fetch file") + return "", err.Error() + } + return "success", "" +} diff --git a/pkg/modules/fetch_test.go b/pkg/modules/fetch_test.go new file mode 100644 index 00000000..fdf99132 --- /dev/null +++ b/pkg/modules/fetch_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modules + +import ( + "context" + "testing" + "time" + + testassert "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestFetch(t *testing.T) { + testcases := []struct { + name string + opt ExecOptions + ctx context.Context + exceptStdout string + exceptStderr string + }{ + { + name: "src and content is empty", + opt: ExecOptions{ + Args: runtime.RawExtension{}, + Host: "local", + Variable: &testVariable{}, + }, + ctx: context.WithValue(context.Background(), "connector", &testConnector{ + output: []byte("success"), + }), exceptStderr: "\"src\" in args should be string", + }, + { + name: "dest is empty", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{"src": "/etc/test.txt"}`), + }, + Host: "local", + Variable: &testVariable{}, + }, + ctx: context.WithValue(context.Background(), "connector", &testConnector{ + output: []byte("success"), + }), + exceptStderr: "\"dest\" in args should be string", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(tc.ctx, time.Second*5) + defer cancel() + acStdout, acStderr := ModuleFetch(ctx, tc.opt) + testassert.Equal(t, tc.exceptStdout, acStdout) + testassert.Equal(t, tc.exceptStderr, acStderr) + }) + } +} diff --git a/pkg/modules/gen_cert.go b/pkg/modules/gen_cert.go new file mode 100644 index 00000000..a5c25e36 --- /dev/null +++ b/pkg/modules/gen_cert.go @@ -0,0 +1,429 @@ +package modules + +import ( + "context" + "crypto" + "crypto/ecdsa" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math" + "math/big" + "net" + "os" + "time" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" + + "github.com/kubesphere/kubekey/v4/pkg/variable" +) + +const ( + // DefaultSignCertAfter defines the default timeout for sign certificates. + defaultSignCertAfter = time.Hour * 24 * 365 * 10 + // CertificateBlockType is a possible value for pem.Block.Type. + certificateBlockType = "CERTIFICATE" + rsaKeySize = 2048 + + // policy to generate file + // policyAlways always generate new cert to override exist cert + policyAlways = "Always" + // policyIfNotPresent if cert is exist, check it.if not generate new cert. + policyIfNotPresent = "IfNotPresent" +) + +// ModuleGenCert generate cert file. +// if root_key and root_cert is empty, generate Self-signed certificate. +func ModuleGenCert(ctx context.Context, options ExecOptions) (stdout string, stderr string) { + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) + return "", err.Error() + } + // args + args := variable.Extension2Variables(options.Args) + rootKeyParam, _ := variable.StringVar(ha.(map[string]any), args, "root_key") + rootCertParam, _ := variable.StringVar(ha.(map[string]any), args, "root_cert") + dateParam, _ := variable.StringVar(ha.(map[string]any), args, "date") + policyParam, _ := variable.StringVar(ha.(map[string]any), args, "policy") + sansParam, _ := variable.StringSliceVar(ha.(map[string]any), args, "sans") + cnParam, _ := variable.StringVar(ha.(map[string]any), args, "cn") + outKeyParam, _ := variable.StringVar(ha.(map[string]any), args, "out_key") + outCertParam, _ := variable.StringVar(ha.(map[string]any), args, "out_cert") + // check args + if policyParam != policyAlways && policyParam != policyIfNotPresent { + return "", "\"policy\" should be one of [Always, IfNotPresent]" + } + if outKeyParam == "" || outCertParam == "" { + return "", "\"out_key\" or \"out_cert\" in args should be string" + } + if cnParam == "" { + return "", "\"cn\" in args should be string" + } + + altName := &certutil.AltNames{ + DNSNames: []string{"localhost"}, + IPs: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}, + } + appendSANsToAltNames(altName, sansParam, outCertParam) + cfg := &certutil.Config{ + CommonName: cnParam, + Organization: []string{"kubekey"}, + AltNames: *altName, + } + + var newKey *rsa.PrivateKey + var newCert *x509.Certificate + newKey, err = rsa.GenerateKey(cryptorand.Reader, rsaKeySize) + if err != nil { + return "", err.Error() + } + + var after time.Duration + // change expiration date + if dateParam != "" { + dur, err := time.ParseDuration(dateParam) + if err != nil { + klog.V(4).ErrorS(err, "Failed to parse duration") + return "", err.Error() + } + after = dur + } + + switch { + case rootKeyParam == "" || rootCertParam == "": // generate Self-signed certificate + newCert, err = NewSelfSignedCACert(*cfg, after, newKey) + if err != nil { + klog.V(4).ErrorS(err, "Failed to generate Self-signed certificate") + return "", err.Error() + } + default: // generate certificate signed by root certificate + parentKey, err := TryLoadKeyFromDisk(rootKeyParam) + if err != nil { + klog.V(4).ErrorS(err, "Failed to load root key") + return "", err.Error() + } + parentCert, _, err := TryLoadCertChainFromDisk(rootCertParam) + if err != nil { + klog.V(4).ErrorS(err, "Failed to load root certificate") + return "", err.Error() + } + if policyParam == policyIfNotPresent { + if _, err := TryLoadKeyFromDisk(outKeyParam); err != nil { + klog.V(4).InfoS("Failed to load out key, new it") + goto NEW + } + existCert, intermediates, err := TryLoadCertChainFromDisk(outCertParam) + if err != nil { + klog.V(4).InfoS("Failed to load out cert, new it") + goto NEW + } + // check if the existing key and cert match the root key and cert + if err := ValidateCertPeriod(existCert, 0); err != nil { + klog.V(4).ErrorS(err, "Failed to ValidateCertPeriod", "out_cert", outCertParam) + return "", err.Error() + } + if err := VerifyCertChain(existCert, intermediates, parentCert); err != nil { + klog.V(4).ErrorS(err, "Failed to VerifyCertChain", "out_cert", outCertParam) + return "", err.Error() + } + if err := validateCertificateWithConfig(existCert, outCertParam, cfg); err != nil { + klog.V(4).ErrorS(err, "Failed to validateCertificateWithConfig", "out_cert", outCertParam) + return "", err.Error() + } + return "skip", "" + } + NEW: + newCert, err = NewSignedCert(*cfg, after, newKey, parentCert, parentKey, true) + if err != nil { + klog.V(4).ErrorS(err, "Failed to generate certificate") + return "", err.Error() + } + } + + // write key and cert to file + if err := WriteKey(outKeyParam, newKey, policyParam); err != nil { + klog.V(4).ErrorS(err, "Failed to write key") + return "", err.Error() + } + if err := WriteCert(outCertParam, newCert, policyParam); err != nil { + klog.V(4).ErrorS(err, "Failed to write certificate") + return "", err.Error() + } + return "success", "" +} + +// WriteKey stores the given key at the given location +func WriteKey(outKey string, key crypto.Signer, policy string) error { + if _, err := os.Stat(outKey); err == nil && policy == policyIfNotPresent { + // skip + return nil + } + if key == nil { + return errors.New("private key cannot be nil when writing to file") + } + + encoded, err := keyutil.MarshalPrivateKeyToPEM(key) + if err != nil { + return errors.Wrapf(err, "unable to marshal private key to PEM") + } + if err := keyutil.WriteKey(outKey, encoded); err != nil { + return errors.Wrapf(err, "unable to write private key to file %s", outKey) + } + + return nil +} + +// WriteCert stores the given certificate at the given location +func WriteCert(outCert string, cert *x509.Certificate, policy string) error { + if _, err := os.Stat(outCert); err == nil && policy == policyIfNotPresent { + // skip + return nil + } + if cert == nil { + return errors.New("certificate cannot be nil when writing to file") + } + + if err := certutil.WriteCert(outCert, EncodeCertPEM(cert)); err != nil { + return errors.Wrapf(err, "unable to write certificate to file %s", outCert) + } + + return nil +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: certificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// TryLoadKeyFromDisk tries to load the key from the disk and validates that it is valid +func TryLoadKeyFromDisk(rootKey string) (crypto.Signer, error) { + // Parse the private key from a file + privKey, err := keyutil.PrivateKeyFromFile(rootKey) + if err != nil { + return nil, errors.Wrapf(err, "couldn't load the private key file %s", rootKey) + } + + // Allow RSA and ECDSA formats only + var key crypto.Signer + switch k := privKey.(type) { + case *rsa.PrivateKey: + key = k + case *ecdsa.PrivateKey: + key = k + default: + return nil, errors.Errorf("the private key file %s is neither in RSA nor ECDSA format", rootKey) + } + + return key, nil +} + +// TryLoadCertChainFromDisk tries to load the cert chain from the disk +func TryLoadCertChainFromDisk(rootCert string) (*x509.Certificate, []*x509.Certificate, error) { + + certs, err := certutil.CertsFromFile(rootCert) + if err != nil { + return nil, nil, errors.Wrapf(err, "couldn't load the certificate file %s", rootCert) + } + + cert := certs[0] + intermediates := certs[1:] + + return cert, intermediates, nil +} + +// appendSANsToAltNames parses SANs from as list of strings and adds them to altNames for use on a specific cert +// altNames is passed in with a pointer, and the struct is modified +// valid IP address strings are parsed and added to altNames.IPs as net.IP's +// RFC-1123 compliant DNS strings are added to altNames.DNSNames as strings +// RFC-1123 compliant wildcard DNS strings are added to altNames.DNSNames as strings +// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for +func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) { + for _, altname := range SANs { + if ip := netutils.ParseIPSloppy(altname); ip != nil { + altNames.IPs = append(altNames.IPs, ip) + } else if len(validation.IsDNS1123Subdomain(altname)) == 0 { + altNames.DNSNames = append(altNames.DNSNames, altname) + } else if len(validation.IsWildcardDNS1123Subdomain(altname)) == 0 { + altNames.DNSNames = append(altNames.DNSNames, altname) + } else { + klog.Warningf( + "[certificates] WARNING: '%s' was not added to the '%s' SAN, because it is not a valid IP or RFC-1123 compliant DNS entry\n", + altname, + certName, + ) + } + } +} + +// NewSelfSignedCACert creates a CA certificate +func NewSelfSignedCACert(cfg certutil.Config, after time.Duration, key crypto.Signer) (*x509.Certificate, error) { + now := time.Now() + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) + notBefore := now.UTC() + if !cfg.NotBefore.IsZero() { + notBefore = cfg.NotBefore.UTC() + } + if after == 0 { // default 10 year + after = defaultSignCertAfter + } + + tmpl := x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: []string{cfg.CommonName}, + NotBefore: notBefore, + NotAfter: now.Add(after).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg certutil.Config, after time.Duration, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) { + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + + keyUsage := x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature + if isCA { + keyUsage |= x509.KeyUsageCertSign + } + + RemoveDuplicateAltNames(&cfg.AltNames) + + if after == 0 { + after = defaultSignCertAfter + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(after).UTC(), + KeyUsage: keyUsage, + ExtKeyUsage: cfg.Usages, + BasicConstraintsValid: true, + IsCA: isCA, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// RemoveDuplicateAltNames removes duplicate items in altNames. +func RemoveDuplicateAltNames(altNames *certutil.AltNames) { + if altNames == nil { + return + } + + if altNames.DNSNames != nil { + altNames.DNSNames = sets.List(sets.New(altNames.DNSNames...)) + } + + ipsKeys := make(map[string]struct{}) + var ips []net.IP + for _, one := range altNames.IPs { + if _, ok := ipsKeys[one.String()]; !ok { + ipsKeys[one.String()] = struct{}{} + ips = append(ips, one) + } + } + altNames.IPs = ips +} + +// ValidateCertPeriod checks if the certificate is valid relative to the current time +// (+/- offset) +func ValidateCertPeriod(cert *x509.Certificate, offset time.Duration) error { + period := fmt.Sprintf("NotBefore: %v, NotAfter: %v", cert.NotBefore, cert.NotAfter) + now := time.Now().Add(offset) + if now.Before(cert.NotBefore) { + return errors.Errorf("the certificate is not valid yet: %s", period) + } + if now.After(cert.NotAfter) { + return errors.Errorf("the certificate has expired: %s", period) + } + return nil +} + +// VerifyCertChain verifies that a certificate has a valid chain of +// intermediate CAs back to the root CA +func VerifyCertChain(cert *x509.Certificate, intermediates []*x509.Certificate, root *x509.Certificate) error { + rootPool := x509.NewCertPool() + rootPool.AddCert(root) + + intermediatePool := x509.NewCertPool() + for _, c := range intermediates { + intermediatePool.AddCert(c) + } + + verifyOptions := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + if _, err := cert.Verify(verifyOptions); err != nil { + return err + } + + return nil +} + +// validateCertificateWithConfig makes sure that a given certificate is valid at +// least for the SANs defined in the configuration. +func validateCertificateWithConfig(cert *x509.Certificate, baseName string, cfg *certutil.Config) error { + for _, dnsName := range cfg.AltNames.DNSNames { + if err := cert.VerifyHostname(dnsName); err != nil { + return errors.Wrapf(err, "certificate %s is invalid", baseName) + } + } + for _, ipAddress := range cfg.AltNames.IPs { + if err := cert.VerifyHostname(ipAddress.String()); err != nil { + return errors.Wrapf(err, "certificate %s is invalid", baseName) + } + } + return nil +} diff --git a/pkg/modules/gen_cert_test.go b/pkg/modules/gen_cert_test.go new file mode 100644 index 00000000..41b91dfd --- /dev/null +++ b/pkg/modules/gen_cert_test.go @@ -0,0 +1,52 @@ +package modules + +import ( + "context" + "os" + "testing" + + testassert "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestModuleGenCert(t *testing.T) { + testcases := []struct { + name string + opt ExecOptions + exceptStdout string + exceptStderr string + }{ + { + name: "gen root cert", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{ +"policy": "IfNotPresent", +"sans": ["localhost"], +"cn": "test", +"out_key": "./test_gen_cert/test-key.pem", +"out_cert": "./test_gen_cert/test-crt.pem" + }`), + }, + Host: "local", + Variable: &testVariable{}, + }, + exceptStdout: "success", + }, + } + + if _, err := os.Stat("./test_gen_cert"); os.IsNotExist(err) { + if err := os.Mkdir("./test_gen_cert", 0755); err != nil { + t.Fatal(err) + } + } + defer os.RemoveAll("./test_gen_cert") + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + stdout, stderr := ModuleGenCert(context.Background(), testcase.opt) + testassert.Equal(t, testcase.exceptStdout, stdout) + testassert.Equal(t, testcase.exceptStderr, stderr) + }) + } +} diff --git a/pkg/modules/module.go b/pkg/modules/module.go index 8cae1d73..d44b7e70 100644 --- a/pkg/modules/module.go +++ b/pkg/modules/module.go @@ -19,6 +19,8 @@ package modules import ( "context" "fmt" + "github.com/kubesphere/kubekey/v4/pkg/connector" + "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -61,7 +63,27 @@ func init() { RegisterModule("command", ModuleCommand) RegisterModule("shell", ModuleCommand) RegisterModule("copy", ModuleCopy) + RegisterModule("fetch", ModuleFetch) RegisterModule("debug", ModuleDebug) RegisterModule("template", ModuleTemplate) RegisterModule("set_fact", ModuleSetFact) + RegisterModule("gen_cert", ModuleGenCert) +} + +func getConnector(ctx context.Context, host string, data map[string]any) (connector.Connector, error) { + var conn connector.Connector + var err error + if v := ctx.Value("connector"); v != nil { + conn = v.(connector.Connector) + } else { + conn, err = connector.NewConnector(host, data) + if err != nil { + return conn, err + } + } + if err = conn.Init(ctx); err != nil { + klog.V(4).ErrorS(err, "failed to init connector") + return conn, err + } + return conn, nil } diff --git a/pkg/modules/module_test.go b/pkg/modules/module_test.go index d43a79c3..db5fb475 100644 --- a/pkg/modules/module_test.go +++ b/pkg/modules/module_test.go @@ -25,7 +25,7 @@ import ( ) type testVariable struct { - value variable.VariableData + value map[string]any err error } @@ -33,12 +33,12 @@ func (v testVariable) Key() string { return "testModule" } -func (v testVariable) Get(option variable.GetOption) (any, error) { +func (v testVariable) Get(f variable.GetFunc) (any, error) { return v.value, v.err } -func (v testVariable) Merge(option ...variable.MergeOption) error { - v.value = variable.VariableData{ +func (v testVariable) Merge(f variable.MergeFunc) error { + v.value = map[string]any{ "k": "v", } return nil diff --git a/pkg/modules/set_fact.go b/pkg/modules/set_fact.go index e645b173..e25f5399 100644 --- a/pkg/modules/set_fact.go +++ b/pkg/modules/set_fact.go @@ -18,45 +18,17 @@ package modules import ( "context" + "fmt" - "k8s.io/klog/v2" - - "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/variable" ) func ModuleSetFact(ctx context.Context, options ExecOptions) (string, string) { + // get host variable args := variable.Extension2Variables(options.Args) - lv, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - klog.V(4).ErrorS(err, "failed to get location vars") - return "", err.Error() - } - factVars := variable.VariableData{} - for k, v := range args { - switch v.(type) { - case string: - factVars[k], err = tmpl.ParseString(lv.(variable.VariableData), v.(string)) - if err != nil { - klog.V(4).ErrorS(err, "template parse error", "input", v) - return "", err.Error() - } - default: - factVars[k] = v - } - } - - if err := options.Variable.Merge(variable.HostMerge{ - HostNames: []string{options.Host}, - LocationUID: "", - Data: factVars, - }); err != nil { - klog.V(4).ErrorS(err, "merge fact error") - return "", err.Error() + if err := options.Variable.Merge(variable.MergeAllRuntimeVariable(options.Host, args)); err != nil { + return "", fmt.Sprintf("set_fact error: %v", err) } return "success", "" } diff --git a/pkg/modules/template.go b/pkg/modules/template.go index 4ac0c2d4..8969da46 100644 --- a/pkg/modules/template.go +++ b/pkg/modules/template.go @@ -18,115 +18,168 @@ package modules import ( "context" + "fmt" "io/fs" "os" "path/filepath" + "strings" "k8s.io/klog/v2" kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - "github.com/kubesphere/kubekey/v4/pkg/connector" "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/project" "github.com/kubesphere/kubekey/v4/pkg/variable" ) func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + if err != nil { + klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host) + return "", err.Error() + } // check args args := variable.Extension2Variables(options.Args) - src := variable.StringVar(args, "src") - if src == nil { + srcParam, err := variable.StringVar(ha.(map[string]any), args, "src") + if err != nil { return "", "\"src\" should be string" } - dest := variable.StringVar(args, "dest") - if dest == nil { + destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + if err != nil { return "", "\"dest\" should be string" } - lv, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) + // get connector + conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) if err != nil { - klog.V(4).ErrorS(err, "failed to get location vars") - return "", err.Error() - } - srcStr, err := tmpl.ParseString(lv.(variable.VariableData), *src) - if err != nil { - klog.V(4).ErrorS(err, "template parse src error", "input", *src) - return "", err.Error() - } - destStr, err := tmpl.ParseString(lv.(variable.VariableData), *dest) - if err != nil { - klog.V(4).ErrorS(err, "template parse dest error", "input", *dest) - return "", err.Error() - } - - var baseFS fs.FS - if filepath.IsAbs(srcStr) { - baseFS = os.DirFS("/") - } else { - projectFs, err := project.New(project.Options{Pipeline: &options.Pipeline}).FS(ctx, false) - if err != nil { - klog.V(4).ErrorS(err, "failed to get project fs") - return "", err.Error() - } - baseFS = projectFs - } - roleName := options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole] - flPath := project.GetTemplatesFromPlayBook(baseFS, options.Pipeline.Spec.Playbook, roleName, srcStr) - if _, err := fs.Stat(baseFS, flPath); err != nil { - klog.V(4).ErrorS(err, "find src error") - return "", err.Error() - } - - var conn connector.Connector - if v := ctx.Value("connector"); v != nil { - conn = v.(connector.Connector) - } else { - // get connector - ha, err := options.Variable.Get(variable.HostVars{HostName: options.Host}) - if err != nil { - klog.V(4).ErrorS(err, "failed to get host vars") - return "", err.Error() - } - conn = connector.NewConnector(options.Host, ha.(variable.VariableData)) - } - if err := conn.Init(ctx); err != nil { - klog.V(4).ErrorS(err, "failed to init connector") return "", err.Error() } defer conn.Close(ctx) - // find src file - lg, err := options.Variable.Get(variable.LocationVars{ - HostName: options.Host, - LocationUID: string(options.Task.UID), - }) - if err != nil { - klog.V(4).ErrorS(err, "failed to get location vars") - return "", err.Error() - } + if filepath.IsAbs(srcParam) { + fileInfo, err := os.Stat(srcParam) + if err != nil { + return "", fmt.Sprintf(" get src file %s in local path error: %v", srcParam, err) + } - data, err := fs.ReadFile(baseFS, flPath) - if err != nil { - klog.V(4).ErrorS(err, "failed to read src file", "file_path", flPath) - return "", err.Error() - } - result, err := tmpl.ParseFile(lg.(variable.VariableData), data) - if err != nil { - klog.V(4).ErrorS(err, "failed to parse file", "file_path", flPath) - return "", err.Error() - } + if fileInfo.IsDir() { // src is dir + if err := filepath.WalkDir(srcParam, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %v", srcParam, err) + } - // copy file - mode := fs.ModePerm - if v := variable.IntVar(args, "mode"); v != nil { - mode = fs.FileMode(*v) - } - if err := conn.CopyFile(ctx, []byte(result), destStr, mode); err != nil { - klog.V(4).ErrorS(err, "failed to copy file", "src", flPath, "dest", destStr) - return "", err.Error() + // get file old mode + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %v", err) + } + mode := info.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + // read file + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("read file error: %v", err) + } + result, err := tmpl.ParseFile(ha.(map[string]any), data) + if err != nil { + return fmt.Errorf("parse file error: %v", err) + } + // copy file to remote + if err := conn.CopyFile(ctx, []byte(result), path, mode); err != nil { + return fmt.Errorf("copy file error: %v", err) + } + return nil + }); err != nil { + return "", fmt.Sprintf(" walk dir %s in local path error: %v", srcParam, err) + } + } else { // src is file + data, err := os.ReadFile(srcParam) + if err != nil { + return "", fmt.Sprintf("read file error: %v", err) + } + result, err := tmpl.ParseFile(ha.(map[string]any), data) + if err != nil { + return "", fmt.Sprintf("parse file error: %v", err) + } + if strings.HasSuffix(destParam, "/") { + destParam += filepath.Base(srcParam) + } + mode := fileInfo.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + if err := conn.CopyFile(ctx, []byte(result), destParam, mode); err != nil { + return "", fmt.Sprintf("copy file error: %v", err) + } + } + } else { + pj, err := project.New(options.Pipeline, false) + if err != nil { + return "", fmt.Sprintf("get project error: %v", err) + } + fileInfo, err := pj.Stat(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return "", fmt.Sprintf("get file %s from project error %v", srcParam, err) + } + + if fileInfo.IsDir() { + if err := pj.WalkDir(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %v", srcParam, err) + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %v", err) + } + mode := info.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + data, err := pj.ReadFile(path, project.GetFileOption{Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return fmt.Errorf("read file error: %v", err) + } + result, err := tmpl.ParseFile(ha.(map[string]any), data) + if err != nil { + return fmt.Errorf("parse file error: %v", err) + } + if err := conn.CopyFile(ctx, []byte(result), path, mode); err != nil { + return fmt.Errorf("copy file error: %v", err) + } + return nil + }); err != nil { + return "", fmt.Sprintf("") + } + } else { + data, err := pj.ReadFile(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]}) + if err != nil { + return "", fmt.Sprintf("read file error: %v", err) + } + result, err := tmpl.ParseFile(ha.(map[string]any), data) + if err != nil { + return "", fmt.Sprintf("parse file error: %v", err) + } + if strings.HasSuffix(destParam, "/") { + destParam = destParam + filepath.Base(srcParam) + } + mode := fileInfo.Mode() + if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { + mode = os.FileMode(modeParam) + } + if err := conn.CopyFile(ctx, []byte(result), destParam, mode); err != nil { + return "", fmt.Sprintf("copy file error: %v", err) + } + } } return "success", "" } diff --git a/pkg/modules/template_test.go b/pkg/modules/template_test.go index 4b9d4b44..e1e8914b 100644 --- a/pkg/modules/template_test.go +++ b/pkg/modules/template_test.go @@ -47,7 +47,7 @@ func TestTemplate(t *testing.T) { opt: ExecOptions{ Args: runtime.RawExtension{}, Host: "local", - Variable: nil, + Variable: &testVariable{}, }, ctx: context.Background(), exceptStderr: "\"src\" should be string", @@ -59,7 +59,7 @@ func TestTemplate(t *testing.T) { Raw: []byte(fmt.Sprintf(`{"src": %s}`, absPath)), }, Host: "local", - Variable: nil, + Variable: &testVariable{}, }, ctx: context.Background(), exceptStderr: "\"dest\" should be string", diff --git a/pkg/project/builtin.go b/pkg/project/builtin.go new file mode 100644 index 00000000..c57ec218 --- /dev/null +++ b/pkg/project/builtin.go @@ -0,0 +1,98 @@ +//go:build builtin +// +build builtin + +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package project + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/kubesphere/kubekey/v4/builtin" + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" +) + +func init() { + builtinProjectFunc = func(pipeline kubekeyv1.Pipeline) (Project, error) { + if pipeline.Spec.Playbook == "" { + return nil, fmt.Errorf("playbook should not be empty") + } + if filepath.IsAbs(pipeline.Spec.Playbook) { + return nil, fmt.Errorf("playbook should be relative path base on project.addr") + } + return &builtinProject{Pipeline: pipeline, FS: builtin.BuiltinPipeline, playbook: pipeline.Spec.Playbook}, nil + } +} + +type builtinProject struct { + kubekeyv1.Pipeline + + fs.FS + // playbook relpath base on projectDir + playbook string +} + +func (p builtinProject) getFilePath(path string, o GetFileOption) string { + var find []string + switch { + case o.IsFile: + if o.Role != "" { + // find from project/roles/roleName + find = append(find, filepath.Join(_const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + } + find = append(find, filepath.Join(_const.ProjectRolesFilesDir, path)) + case o.IsTemplate: + // find from project/roles/roleName + if o.Role != "" { + find = append(find, filepath.Join(_const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + } + find = append(find, filepath.Join(_const.ProjectRolesTemplateDir, path)) + default: + find = append(find, filepath.Join(path)) + } + for _, s := range find { + if _, err := fs.Stat(p.FS, s); err == nil { + return s + } + } + return "" +} + +func (p builtinProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { + return fs.Stat(p.FS, p.getFilePath(path, option)) +} + +func (p builtinProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { + return fs.WalkDir(p.FS, p.getFilePath(path, option), f) +} + +func (p builtinProject) ReadFile(path string, option GetFileOption) ([]byte, error) { + return fs.ReadFile(p.FS, p.getFilePath(path, option)) +} + +func (p builtinProject) MarshalPlaybook() (*kkcorev1.Playbook, error) { + return marshalPlaybook(p.FS, p.playbook) +} diff --git a/pkg/project/git.go b/pkg/project/git.go new file mode 100644 index 00000000..b3ba6d78 --- /dev/null +++ b/pkg/project/git.go @@ -0,0 +1,161 @@ +/* +Copyright 2023 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package project + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/transport/http" + "k8s.io/klog/v2" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" +) + +func newGitProject(pipeline kubekeyv1.Pipeline, update bool) (Project, error) { + if pipeline.Spec.Playbook == "" || pipeline.Spec.Project.Addr == "" { + return nil, fmt.Errorf("playbook and project.addr should not be empty") + } + if filepath.IsAbs(pipeline.Spec.Playbook) { + return nil, fmt.Errorf("playbook should be relative path base on project.addr") + } + + // git clone to project dir + if pipeline.Spec.Project.Name == "" { + pipeline.Spec.Project.Name = strings.TrimSuffix(pipeline.Spec.Project.Addr[strings.LastIndex(pipeline.Spec.Project.Addr, "/")+1:], ".git") + } + p := &gitProject{ + Pipeline: pipeline, + projectDir: filepath.Join(_const.GetWorkDir(), _const.ProjectDir, pipeline.Spec.Project.Name), + playbook: pipeline.Spec.Playbook, + } + if _, err := os.Stat(p.projectDir); os.IsNotExist(err) { + // git clone + if err := p.gitClone(context.Background()); err != nil { + return nil, fmt.Errorf("clone git project error: %v", err) + } + } else if update { + // git pull + if err := p.gitPull(context.Background()); err != nil { + return nil, fmt.Errorf("pull git project error: %v", err) + } + } + return p, nil +} + +// gitProject from git +type gitProject struct { + kubekeyv1.Pipeline + + projectDir string + // playbook relpath base on projectDir + playbook string +} + +func (p gitProject) getFilePath(path string, o GetFileOption) string { + var find []string + switch { + case o.IsFile: + if o.Role != "" { + // find from project/roles/roleName + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + } + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesFilesDir, path)) + case o.IsTemplate: + // find from project/roles/roleName + if o.Role != "" { + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + } + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesTemplateDir, path)) + default: + find = append(find, filepath.Join(p.projectDir, path)) + } + for _, s := range find { + if _, err := os.Stat(s); err == nil { + return s + } + } + return "" +} + +func (p gitProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { + return os.Stat(p.getFilePath(path, option)) +} + +func (p gitProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { + return filepath.WalkDir(p.getFilePath(path, option), f) +} + +func (p gitProject) ReadFile(path string, option GetFileOption) ([]byte, error) { + return os.ReadFile(p.getFilePath(path, option)) +} + +func (p gitProject) MarshalPlaybook() (*kkcorev1.Playbook, error) { + return marshalPlaybook(os.DirFS(p.projectDir), p.Pipeline.Spec.Playbook) +} + +func (p gitProject) gitClone(ctx context.Context) error { + if _, err := git.PlainCloneContext(ctx, p.projectDir, false, &git.CloneOptions{ + URL: p.Pipeline.Spec.Project.Addr, + Progress: nil, + ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch), + SingleBranch: true, + Auth: &http.TokenAuth{p.Pipeline.Spec.Project.Token}, + InsecureSkipTLS: false, + }); err != nil { + klog.Errorf("clone project %s failed: %v", p.Pipeline.Spec.Project.Addr, err) + return err + } + return nil +} + +func (p gitProject) gitPull(ctx context.Context) error { + open, err := git.PlainOpen(p.projectDir) + if err != nil { + klog.V(4).ErrorS(err, "git open error", "local_dir", p.projectDir) + return err + } + wt, err := open.Worktree() + if err != nil { + klog.V(4).ErrorS(err, "git open worktree error", "local_dir", p.projectDir) + return err + } + if err := wt.PullContext(ctx, &git.PullOptions{ + RemoteURL: p.Pipeline.Spec.Project.Addr, + ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch), + SingleBranch: true, + Auth: &http.TokenAuth{p.Pipeline.Spec.Project.Token}, + InsecureSkipTLS: false, + }); err != nil && err != git.NoErrAlreadyUpToDate { + klog.V(4).ErrorS(err, "git pull error", "local_dir", p.projectDir) + return err + } + + return nil +} diff --git a/pkg/project/helper.go b/pkg/project/helper.go index e600774b..9ca68d1a 100644 --- a/pkg/project/helper.go +++ b/pkg/project/helper.go @@ -22,14 +22,212 @@ import ( "os" "path/filepath" + "gopkg.in/yaml.v3" + "k8s.io/klog/v2" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" ) -// GetPlaybookBaseFromPlaybook +// marshalPlaybook kkcorev1.Playbook from a playbook file +func marshalPlaybook(baseFS fs.FS, pbPath string) (*kkcorev1.Playbook, error) { + // convert playbook to kkcorev1.Playbook + pb := &kkcorev1.Playbook{} + if err := loadPlaybook(baseFS, pbPath, pb); err != nil { + klog.V(4).ErrorS(err, "Load playbook failed", "playbook", pbPath) + return nil, err + } + + // convertRoles + if err := convertRoles(baseFS, pbPath, pb); err != nil { + klog.V(4).ErrorS(err, "ConvertRoles error", "playbook", pbPath) + return nil, err + } + + if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil { + klog.V(4).ErrorS(err, "ConvertIncludeTasks error", "playbook", pbPath) + return nil, err + } + + if err := pb.Validate(); err != nil { + klog.V(4).ErrorS(err, "Validate playbook failed", "playbook", pbPath) + return nil, err + } + return pb, nil +} + +// loadPlaybook with include_playbook. Join all playbooks into one playbook +func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { + // baseDir is the local ansible project dir which playbook belong to + pbData, err := fs.ReadFile(baseFS, pbPath) + if err != nil { + klog.V(4).ErrorS(err, "Read playbook failed", "playbook", pbPath) + return err + } + var plays []kkcorev1.Play + if err := yaml.Unmarshal(pbData, &plays); err != nil { + klog.V(4).ErrorS(err, "Unmarshal playbook failed", "playbook", pbPath) + return err + } + + for _, p := range plays { + if p.ImportPlaybook != "" { + importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook) + if importPlaybook == "" { + return fmt.Errorf("cannot found import playbook %s", importPlaybook) + } + if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil { + return err + } + } + + // fill block in roles + for i, r := range p.Roles { + roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) + if roleBase == "" { + return fmt.Errorf("cannot found Role %s", r.Role) + } + mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) + if mainTask == "" { + return fmt.Errorf("cannot found main task for Role %s", r.Role) + } + + rdata, err := fs.ReadFile(baseFS, mainTask) + if err != nil { + klog.V(4).ErrorS(err, "Read Role failed", "playbook", pbPath, "Role", r.Role) + return err + } + var blocks []kkcorev1.Block + if err := yaml.Unmarshal(rdata, &blocks); err != nil { + klog.V(4).ErrorS(err, "Unmarshal Role failed", "playbook", pbPath, "Role", r.Role) + return err + } + p.Roles[i].Block = blocks + } + pb.Play = append(pb.Play, p) + } + + return nil +} + +// convertRoles convert roleName to block +func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { + for i, p := range pb.Play { + for i, r := range p.Roles { + roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) + if roleBase == "" { + return fmt.Errorf("cannot found Role %s", r.Role) + } + + // load block + mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) + if mainTask == "" { + return fmt.Errorf("cannot found main task for Role %s", r.Role) + } + + rdata, err := fs.ReadFile(baseFS, mainTask) + if err != nil { + klog.V(4).ErrorS(err, "Read Role failed", "playbook", pbPath, "Role", r.Role) + return err + } + var blocks []kkcorev1.Block + if err := yaml.Unmarshal(rdata, &blocks); err != nil { + klog.V(4).ErrorS(err, "Unmarshal Role failed", "playbook", pbPath, "Role", r.Role) + return err + } + p.Roles[i].Block = blocks + + // load defaults (optional) + mainDefault := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile)) + if mainDefault != "" { + mainData, err := fs.ReadFile(baseFS, mainDefault) + if err != nil { + klog.V(4).ErrorS(err, "Read defaults variable for Role error", "playbook", pbPath, "Role", r.Role) + return err + } + + var vars map[string]any + var node yaml.Node + if err := yaml.Unmarshal(mainData, &node); err != nil { + klog.V(4).ErrorS(err, "Unmarshal defaults variable for Role error", "playbook", pbPath, "Role", r.Role) + return err + } + if err := node.Decode(&vars); err != nil { + return err + } + p.Roles[i].Vars = vars + } + } + pb.Play[i] = p + } + return nil +} + +// convertIncludeTasks from file to blocks +func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error { + var pbBase = filepath.Dir(filepath.Dir(pbPath)) + for _, play := range pb.Play { + if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil { + klog.V(4).ErrorS(err, "Convert pre_tasks error", "playbook", pbPath) + return err + } + if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil { + klog.V(4).ErrorS(err, "Convert tasks error", "playbook", pbPath) + return err + } + if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil { + klog.V(4).ErrorS(err, "Convert post_tasks error", "playbook", pbPath) + return err + } + + for _, r := range play.Roles { + roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) + if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil { + klog.V(4).ErrorS(err, "Convert Role error", "playbook", pbPath, "Role", r.Role) + return err + } + } + } + return nil +} + +func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkcorev1.Block) error { + for i, b := range blocks { + if b.IncludeTasks != "" { + data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks)) + if err != nil { + klog.V(4).ErrorS(err, "Read includeTask file error", "name", b.Name, "file_path", filepath.Join(baseDir, b.IncludeTasks)) + return err + } + var bs []kkcorev1.Block + if err := yaml.Unmarshal(data, &bs); err != nil { + klog.V(4).ErrorS(err, "Unmarshal includeTask data error", "name", b.Name, "file_path", filepath.Join(baseDir, b.IncludeTasks)) + return err + } + b.Block = bs + blocks[i] = b + } + if err := fileToBlock(baseFS, baseDir, b.Block); err != nil { + klog.V(4).ErrorS(err, "Convert block error", "name", b.Name) + return err + } + if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil { + klog.V(4).ErrorS(err, "Convert rescue error", "name", b.Name) + return err + } + if err := fileToBlock(baseFS, baseDir, b.Always); err != nil { + klog.V(4).ErrorS(err, "Convert always error", "name", b.Name) + return err + } + } + return nil +} + +// getPlaybookBaseFromPlaybook // find from project/playbooks/playbook if exists. // find from current_playbook/playbooks/playbook if exists. // find current_playbook/playbook -func GetPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) string { +func getPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) string { var find []string // find from project/playbooks/playbook find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectPlaybooksDir, playbook)) @@ -58,11 +256,11 @@ func GetPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) s return "" } -// GetRoleBaseFromPlaybook +// getRoleBaseFromPlaybook // find from project/roles/roleName if exists. // find from current_playbook/roles/roleName if exists. // find current_playbook/playbook -func GetRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) string { +func getRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) string { var find []string // find from project/roles/roleName find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectRolesDir, roleName)) @@ -84,38 +282,10 @@ func GetRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) strin return "" } -// GetFilesFromPlayBook -func GetFilesFromPlayBook(baseFS fs.FS, pbPath string, roleName string, filePath string) string { - if filepath.IsAbs(filePath) { - return filePath - } - - if roleName != "" { - return filepath.Join(GetRoleBaseFromPlaybook(baseFS, pbPath, roleName), _const.ProjectRolesFilesDir, filePath) - } else { - // find from pbPath dir like: project/playbooks/templates/tmplPath - return filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesFilesDir, filePath) - } -} - -// GetTemplatesFromPlayBook -func GetTemplatesFromPlayBook(baseFS fs.FS, pbPath string, roleName string, tmplPath string) string { - if filepath.IsAbs(tmplPath) { - return tmplPath - } - - if roleName != "" { - return filepath.Join(GetRoleBaseFromPlaybook(baseFS, pbPath, roleName), _const.ProjectRolesTemplateDir, tmplPath) - } else { - // find from pbPath dir like: project/playbooks/templates/tmplPath - return filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesTemplateDir, tmplPath) - } -} - -// GetYamlFile +// getYamlFile // return *.yaml if exists // return *.yml if exists. -func GetYamlFile(baseFS fs.FS, base string) string { +func getYamlFile(baseFS fs.FS, base string) string { var find []string find = append(find, fmt.Sprintf("%s.yaml", base), diff --git a/pkg/project/helper_test.go b/pkg/project/helper_test.go index 081b0e94..cb281a96 100644 --- a/pkg/project/helper_test.go +++ b/pkg/project/helper_test.go @@ -22,6 +22,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) func TestGetPlaybookBaseFromAbsPlaybook(t *testing.T) { @@ -53,7 +55,7 @@ func TestGetPlaybookBaseFromAbsPlaybook(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.except, GetPlaybookBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.playbook)) + assert.Equal(t, tc.except, getPlaybookBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.playbook)) }) } } @@ -87,82 +89,11 @@ func TestGetRoleBaseFromAbsPlaybook(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.except, GetRoleBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.roleName)) + assert.Equal(t, tc.except, getRoleBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.roleName)) }) } } -func TestGetFilesFromPlayBook(t *testing.T) { - testcases := []struct { - name string - pbPath string - role string - filePath string - excepted string - }{ - { - name: "absolute filePath", - filePath: "/tmp", - excepted: "/tmp", - }, - { - name: "empty role", - pbPath: "playbooks/test.yaml", - filePath: "tmp", - excepted: "playbooks/files/tmp", - }, - { - name: "not empty role", - pbPath: "playbooks/test.yaml", - role: "role1", - filePath: "tmp", - excepted: "roles/role1/files/tmp", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.excepted, GetFilesFromPlayBook(os.DirFS("testdata"), tc.pbPath, tc.role, tc.filePath)) - }) - } -} - -func TestGetTemplatesFromPlayBook(t *testing.T) { - testcases := []struct { - name string - pbPath string - role string - filePath string - excepted string - }{ - { - name: "absolute filePath", - filePath: "/tmp", - excepted: "/tmp", - }, - { - name: "empty role", - pbPath: "playbooks/test.yaml", - filePath: "tmp", - excepted: "playbooks/templates/tmp", - }, - { - name: "not empty role", - pbPath: "playbooks/test.yaml", - role: "role1", - filePath: "tmp", - excepted: "roles/role1/templates/tmp", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.excepted, GetTemplatesFromPlayBook(os.DirFS("testdata"), tc.pbPath, tc.role, tc.filePath)) - }) - } - -} - func TestGetYamlFile(t *testing.T) { testcases := []struct { name string @@ -188,7 +119,114 @@ func TestGetYamlFile(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.except, GetYamlFile(os.DirFS("testdata"), tc.base)) + assert.Equal(t, tc.except, getYamlFile(os.DirFS("testdata"), tc.base)) + }) + } +} + +func TestMarshalPlaybook(t *testing.T) { + testcases := []struct { + name string + file string + except *kkcorev1.Playbook + }{ + { + name: "marshal playbook", + file: "playbooks/playbook1.yaml", + except: &kkcorev1.Playbook{[]kkcorev1.Play{ + { + Base: kkcorev1.Base{Name: "play1"}, + PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}}, + Roles: []kkcorev1.Role{ + {kkcorev1.RoleInfo{ + Role: "role1", + Block: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "role1 | block1"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }, + }}, + }, + Handlers: nil, + PreTasks: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | pre_block1"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }, + PostTasks: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | post_block1"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }, + Tasks: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1"}}, + BlockInfo: kkcorev1.BlockInfo{Block: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block1"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block2"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }}, + }, + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block2"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }, + }, + { + Base: kkcorev1.Base{Name: "play2"}, + PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}}, + Tasks: []kkcorev1.Block{ + { + BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play2 | block1"}}, + Task: kkcorev1.Task{UnknownFiled: map[string]any{ + "debug": map[string]any{ + "msg": "echo \"hello world\"", + }, + }}, + }, + }, + }, + }}, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + pb, err := marshalPlaybook(os.DirFS("testdata"), tc.file) + assert.NoError(t, err) + assert.Equal(t, tc.except, pb) }) } } diff --git a/pkg/project/local.go b/pkg/project/local.go new file mode 100644 index 00000000..bb69b6a3 --- /dev/null +++ b/pkg/project/local.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package project + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" +) + +var builtinProjectFS fs.FS + +func newLocalProject(pipeline kubekeyv1.Pipeline) (Project, error) { + if !filepath.IsAbs(pipeline.Spec.Playbook) { + if pipeline.Spec.Project.Addr == "" { + wd, err := os.Getwd() + if err != nil { + return nil, err + } + pipeline.Spec.Project.Addr = wd + } + pipeline.Spec.Playbook = filepath.Join(pipeline.Spec.Project.Addr, pipeline.Spec.Playbook) + } + + if _, err := os.Stat(pipeline.Spec.Playbook); err != nil { + return nil, fmt.Errorf("cannot find playbook %s", pipeline.Spec.Playbook) + } + if filepath.Base(filepath.Dir(pipeline.Spec.Playbook)) != _const.ProjectPlaybooksDir { + // the format of playbook is not correct + return nil, fmt.Errorf("playbook should be projectDir/playbooks/playbookfile") + } + projectDir := filepath.Dir(filepath.Dir(pipeline.Spec.Playbook)) + playbook, err := filepath.Rel(projectDir, pipeline.Spec.Playbook) + if err != nil { + return nil, err + } + return &localProject{Pipeline: pipeline, projectDir: projectDir, playbook: playbook}, nil +} + +type localProject struct { + kubekeyv1.Pipeline + + projectDir string + // playbook relpath base on projectDir + playbook string +} + +func (p localProject) getFilePath(path string, o GetFileOption) string { + var find []string + switch { + case o.IsFile: + if o.Role != "" { + // find from project/roles/roleName + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesFilesDir, path)) + } + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesFilesDir, path)) + case o.IsTemplate: + // find from project/roles/roleName + if o.Role != "" { + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + // find from pbPath dir like: current_playbook/roles/roleName + find = append(find, filepath.Join(p.projectDir, p.playbook, _const.ProjectRolesDir, o.Role, _const.ProjectRolesTemplateDir, path)) + } + find = append(find, filepath.Join(p.projectDir, _const.ProjectRolesTemplateDir, path)) + default: + find = append(find, filepath.Join(p.projectDir, path)) + } + for _, s := range find { + if _, err := os.Stat(s); err == nil { + return s + } + } + return "" +} + +func (p localProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { + return os.Stat(p.getFilePath(path, option)) +} + +func (p localProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { + return filepath.WalkDir(p.getFilePath(path, option), f) +} + +func (p localProject) ReadFile(path string, option GetFileOption) ([]byte, error) { + return os.ReadFile(p.getFilePath(path, option)) +} + +func (p localProject) MarshalPlaybook() (*kkcorev1.Playbook, error) { + return marshalPlaybook(os.DirFS(p.projectDir), p.playbook) +} diff --git a/pkg/project/project.go b/pkg/project/project.go index 800aa5c3..b59a2db4 100644 --- a/pkg/project/project.go +++ b/pkg/project/project.go @@ -17,33 +17,43 @@ limitations under the License. package project import ( - "context" "io/fs" - "path/filepath" + "os" "strings" + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - _const "github.com/kubesphere/kubekey/v4/pkg/const" ) +var builtinProjectFunc func(kubekeyv1.Pipeline) (Project, error) + +// todo should be more clear defined. +// such as the project represent location of actutal project. +// get project file should base on it type Project interface { - FS(ctx context.Context, update bool) (fs.FS, error) + MarshalPlaybook() (*kkcorev1.Playbook, error) + Stat(path string, option GetFileOption) (os.FileInfo, error) + WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error + ReadFile(path string, option GetFileOption) ([]byte, error) } -type Options struct { - *kubekeyv1.Pipeline +type GetFileOption struct { + Role string + IsTemplate bool + IsFile bool } -func New(o Options) Project { - if strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "https://") || - strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "http://") || - strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "git@") { - // git clone to project dir - if o.Pipeline.Spec.Project.Name == "" { - o.Pipeline.Spec.Project.Name = strings.TrimSuffix(o.Pipeline.Spec.Project.Addr[strings.LastIndex(o.Pipeline.Spec.Project.Addr, "/")+1:], ".git") - } - return &gitProject{Pipeline: *o.Pipeline, localDir: filepath.Join(_const.GetWorkDir(), _const.ProjectDir, o.Spec.Project.Name)} +func New(pipeline kubekeyv1.Pipeline, update bool) (Project, error) { + if strings.HasPrefix(pipeline.Spec.Project.Addr, "https://") || + strings.HasPrefix(pipeline.Spec.Project.Addr, "http://") || + strings.HasPrefix(pipeline.Spec.Project.Addr, "git@") { + + return newGitProject(pipeline, update) } - return &localProject{Pipeline: *o.Pipeline} + if _, ok := pipeline.Annotations[kubekeyv1.BuiltinsProjectAnnotation]; ok { + return builtinProjectFunc(pipeline) + } + + return newLocalProject(pipeline) } diff --git a/pkg/project/project_git.go b/pkg/project/project_git.go deleted file mode 100644 index 49a6f662..00000000 --- a/pkg/project/project_git.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2023 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package project - -import ( - "context" - "io/fs" - "os" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/transport/http" - "k8s.io/klog/v2" - - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" -) - -// gitProject from git -type gitProject struct { - kubekeyv1.Pipeline - - localDir string -} - -func (r gitProject) FS(ctx context.Context, update bool) (fs.FS, error) { - if !update { - return os.DirFS(r.localDir), nil - } - if err := r.init(ctx); err != nil { - klog.V(4).ErrorS(err, "Init git project error", "project_addr", r.Pipeline.Spec.Project.Addr) - return nil, err - } - return os.DirFS(r.localDir), nil -} - -func (r gitProject) init(ctx context.Context) error { - if _, err := os.Stat(r.localDir); err != nil { - // git clone - return r.gitClone(ctx) - } else { - // git pull - return r.gitPull(ctx) - } -} - -func (r gitProject) gitClone(ctx context.Context) error { - if _, err := git.PlainCloneContext(ctx, r.localDir, false, &git.CloneOptions{ - URL: r.Pipeline.Spec.Project.Addr, - Progress: nil, - ReferenceName: plumbing.NewBranchReferenceName(r.Pipeline.Spec.Project.Branch), - SingleBranch: true, - Auth: &http.TokenAuth{r.Pipeline.Spec.Project.Token}, - InsecureSkipTLS: false, - }); err != nil { - klog.Errorf("clone project %s failed: %v", r.Pipeline.Spec.Project.Addr, err) - return err - } - return nil -} - -func (r gitProject) gitPull(ctx context.Context) error { - open, err := git.PlainOpen(r.localDir) - if err != nil { - klog.V(4).ErrorS(err, "git open error", "local_dir", r.localDir) - return err - } - wt, err := open.Worktree() - if err != nil { - klog.V(4).ErrorS(err, "git open worktree error", "local_dir", r.localDir) - return err - } - if err := wt.PullContext(ctx, &git.PullOptions{ - RemoteURL: r.Pipeline.Spec.Project.Addr, - ReferenceName: plumbing.NewBranchReferenceName(r.Pipeline.Spec.Project.Branch), - SingleBranch: true, - Auth: &http.TokenAuth{r.Pipeline.Spec.Project.Token}, - InsecureSkipTLS: false, - }); err != nil && err != git.NoErrAlreadyUpToDate { - klog.V(4).ErrorS(err, "git pull error", "local_dir", r.localDir) - return err - } - - return nil -} diff --git a/pkg/project/project_local.go b/pkg/project/project_local.go deleted file mode 100644 index c57f3080..00000000 --- a/pkg/project/project_local.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2023 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package project - -import ( - "context" - "fmt" - "io/fs" - "os" - "path/filepath" - - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - "github.com/kubesphere/kubekey/v4/project" -) - -type localProject struct { - kubekeyv1.Pipeline - - fs fs.FS -} - -func (r localProject) FS(ctx context.Context, update bool) (fs.FS, error) { - if _, ok := r.Pipeline.Annotations[kubekeyv1.BuiltinsProjectAnnotation]; ok { - return project.InternalPipeline, nil - } - if filepath.IsAbs(r.Pipeline.Spec.Playbook) { - return os.DirFS("/"), nil - } - - if r.fs != nil { - return r.fs, nil - } - - if r.Pipeline.Spec.Project.Addr != "" { - return os.DirFS(r.Pipeline.Spec.Project.Addr), nil - } - - return nil, fmt.Errorf("cannot get filesystem from absolute project %s", r.Pipeline.Spec.Project.Addr) -} diff --git a/pkg/proxy/api_resources.go b/pkg/proxy/api_resources.go index 1d77229c..e8b1b6b9 100644 --- a/pkg/proxy/api_resources.go +++ b/pkg/proxy/api_resources.go @@ -67,19 +67,19 @@ func newApiIResources(gv schema.GroupVersion) *apiResources { } } -func (a *apiResources) AddResource(o resourceOptions) error { +func (r *apiResources) AddResource(o resourceOptions) error { if o.admit == nil { // set default admit o.admit = newAlwaysAdmit() } - a.resourceOptions = append(a.resourceOptions, o) + r.resourceOptions = append(r.resourceOptions, o) storageVersionProvider, isStorageVersionProvider := o.storage.(apirest.StorageVersionProvider) var apiResource metav1.APIResource if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && isStorageVersionProvider && storageVersionProvider.StorageVersion() != nil { versioner := storageVersionProvider.StorageVersion() - gvk, err := getStorageVersionKind(versioner, o.storage, a.typer) + gvk, err := getStorageVersionKind(versioner, o.storage, r.typer) if err != nil { klog.V(4).ErrorS(err, "failed to get storage version kind", "storage", reflect.TypeOf(o.storage)) return err @@ -109,14 +109,14 @@ func (a *apiResources) AddResource(o resourceOptions) error { } apiResource.SingularName = singularNameProvider.GetSingularName() } - a.list = append(a.list, apiResource) + r.list = append(r.list, apiResource) return nil } -func (s *apiResources) handlerApiResources() http.HandlerFunc { +func (r *apiResources) handlerApiResources() http.HandlerFunc { return func(writer http.ResponseWriter, request *http.Request) { - responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, writer, request, http.StatusOK, - &metav1.APIResourceList{GroupVersion: s.gv.String(), APIResources: s.list}, false) + responsewriters.WriteObjectNegotiated(r.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, writer, request, http.StatusOK, + &metav1.APIResourceList{GroupVersion: r.gv.String(), APIResources: r.list}, false) } } diff --git a/pkg/proxy/internal/watcher.go b/pkg/proxy/internal/watcher.go index 9162e414..1d86c017 100644 --- a/pkg/proxy/internal/watcher.go +++ b/pkg/proxy/internal/watcher.go @@ -103,10 +103,11 @@ func (w *fileWatcher) watch() { select { case event := <-w.watcher.Events: klog.V(6).InfoS("receive watcher event", "event", event) - // if the change is namespace dir. + // Adjust the listening range. a watcher for a namespace. + // the watcher contains all resources in the namespace. entry, err := os.Stat(event.Name) if err != nil { - klog.V(4).ErrorS(err, "failed to stat resource file", "event", event) + klog.V(6).ErrorS(err, "failed to stat resource file", "event", event) continue } if entry.IsDir() && len(filepath.SplitList(strings.TrimPrefix(event.Name, w.prefix))) == 1 { @@ -114,11 +115,11 @@ func (w *fileWatcher) watch() { switch event.Op { case fsnotify.Create: if err := w.watcher.Add(event.Name); err != nil { - klog.V(4).ErrorS(err, "failed to add namespace dir to file watcher", "event", event) + klog.V(6).ErrorS(err, "failed to add namespace dir to file watcher", "event", event) } case fsnotify.Remove: if err := w.watcher.Remove(event.Name); err != nil { - klog.V(4).ErrorS(err, "failed to remove namespace dir to file watcher", "event", event) + klog.V(6).ErrorS(err, "failed to remove namespace dir to file watcher", "event", event) } } continue @@ -128,7 +129,7 @@ func (w *fileWatcher) watch() { if strings.HasSuffix(event.Name, yamlSuffix) { data, err := os.ReadFile(event.Name) if err != nil { - klog.V(4).ErrorS(err, "failed to read resource file", "event", event) + klog.V(6).ErrorS(err, "failed to read resource file", "event", event) continue } @@ -136,16 +137,16 @@ func (w *fileWatcher) watch() { case fsnotify.Create: obj, _, err := w.codec.Decode(data, nil, w.newFunc()) if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "event", event) + klog.V(6).ErrorS(err, "failed to decode resource file", "event", event) continue } metaObj, err := meta.Accessor(obj) if err != nil { - klog.V(4).ErrorS(err, "failed to convert to metaObject", "event", event) + klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event) continue } if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file - klog.V(4).InfoS("name is empty. ignore", "event", event) + klog.V(6).InfoS("name is empty. ignore", "event", event) continue } w.watchEvents <- watch.Event{ @@ -155,16 +156,16 @@ func (w *fileWatcher) watch() { case fsnotify.Write: obj, _, err := w.codec.Decode(data, nil, w.newFunc()) if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "event", event) + klog.V(6).ErrorS(err, "failed to decode resource file", "event", event) continue } metaObj, err := meta.Accessor(obj) if err != nil { - klog.V(4).ErrorS(err, "failed to convert to metaObject", "event", event) + klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event) continue } if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file - klog.V(4).InfoS("name is empty. ignore", "event", event) + klog.V(6).InfoS("name is empty. ignore", "event", event) continue } if strings.HasSuffix(filepath.Base(event.Name), deleteTagSuffix) { diff --git a/pkg/proxy/router.go b/pkg/proxy/router.go index 692d0ba6..b92b7c60 100644 --- a/pkg/proxy/router.go +++ b/pkg/proxy/router.go @@ -1,3 +1,19 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package proxy import "net/http" diff --git a/pkg/proxy/transport.go b/pkg/proxy/transport.go index a487238b..4de05aa7 100644 --- a/pkg/proxy/transport.go +++ b/pkg/proxy/transport.go @@ -78,6 +78,10 @@ func NewLocalClient() (ctrlclient.Client, error) { }) } +// NewProxyTransport return a new http.RoundTripper use in ctrl.client. +// if the resources group version is kubekey.kubesphere.io/v1alpha. store it in local. +// if the resources group version is kubekey.kubesphere.io/v1 and isLocal is true. store it in local. +// if the resources group version is kubekey.kubesphere.io/v1 and isLocal is true. send remote s http request. func NewProxyTransport(isLocal bool) (http.RoundTripper, error) { lt := &transport{ isLocal: isLocal, @@ -108,6 +112,7 @@ func NewProxyTransport(isLocal bool) (http.RoundTripper, error) { if err := lt.registerResources(kkv1alpha1); err != nil { klog.V(4).ErrorS(err, "failed to register resources") } + if isLocal { // register kubekeyv1 resources kkv1 := newApiIResources(kubekeyv1.SchemeGroupVersion) diff --git a/pkg/task/controller.go b/pkg/task/controller.go deleted file mode 100644 index ef1843e0..00000000 --- a/pkg/task/controller.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2023 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package task - -import ( - "context" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/runtime" - cgcache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - "github.com/kubesphere/kubekey/v4/pkg/proxy" -) - -// Controller is the interface for running tasks -type Controller interface { - // Start the controller - Start(ctx context.Context) error - // AddTasks adds tasks to the controller - AddTasks(ctx context.Context, pipeline *kubekeyv1.Pipeline) error -} - -type ControllerOptions struct { - *runtime.Scheme - MaxConcurrent int - ctrlclient.Client - TaskReconciler reconcile.Reconciler - VariableCache cgcache.Store -} - -func NewController(o ControllerOptions) (Controller, error) { - if o.MaxConcurrent == 0 { - o.MaxConcurrent = 1 - } - if o.Client == nil { - var err error - o.Client, err = proxy.NewLocalClient() - if err != nil { - return nil, err - } - } - - return &taskController{ - schema: o.Scheme, - MaxConcurrent: o.MaxConcurrent, - taskqueue: workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}), - client: o.Client, - taskReconciler: o.TaskReconciler, - }, nil -} diff --git a/pkg/task/internal.go b/pkg/task/internal.go deleted file mode 100644 index 7af05839..00000000 --- a/pkg/task/internal.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2023 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package task - -import ( - "context" - "fmt" - "sync" - - "github.com/google/uuid" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - "github.com/kubesphere/kubekey/v4/pkg/converter" - "github.com/kubesphere/kubekey/v4/pkg/modules" - "github.com/kubesphere/kubekey/v4/pkg/project" - "github.com/kubesphere/kubekey/v4/pkg/variable" -) - -type taskController struct { - schema *runtime.Scheme - client ctrlclient.Client - taskReconciler reconcile.Reconciler - - taskqueue workqueue.RateLimitingInterface - MaxConcurrent int -} - -// AddTasks to taskqueue if Tasks is not completed -func (c *taskController) AddTasks(ctx context.Context, pipeline *kubekeyv1.Pipeline) error { - var nsTasks = &kubekeyv1alpha1.TaskList{} - - if err := c.client.List(ctx, nsTasks, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingFields{ - kubekeyv1alpha1.TaskOwnerField: ctrlclient.ObjectKeyFromObject(pipeline).String(), - }); err != nil { - klog.V(4).ErrorS(err, "List tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return err - } - defer func() { - for _, task := range nsTasks.Items { - c.taskqueue.Add(ctrl.Request{NamespacedName: ctrlclient.ObjectKeyFromObject(&task)}) - } - converter.CalculatePipelineStatus(nsTasks, pipeline) - }() - - if len(nsTasks.Items) != 0 { - // task has generated. add exist generated task to taskqueue. - return nil - } - // generate tasks - v, err := variable.GetVariable(variable.Options{ - Ctx: ctx, - Client: c.client, - Pipeline: *pipeline, - }) - if err != nil { - return err - } - - klog.V(6).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - projectFs, err := project.New(project.Options{Pipeline: pipeline}).FS(ctx, true) - if err != nil { - klog.V(4).ErrorS(err, "Deal project error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return err - } - - // convert to transfer.Playbook struct - pb, err := converter.MarshalPlaybook(projectFs, pipeline.Spec.Playbook) - if err != nil { - return err - } - - // set pipeline location - if err := v.Merge(variable.LocationMerge{ - UID: string(pipeline.UID), - Name: pipeline.Name, - }); err != nil { - klog.V(4).ErrorS(err, "set top location for pipeline", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return err - } - - for _, play := range pb.Play { - if !play.Taggable.IsEnabled(pipeline.Spec.Tags, pipeline.Spec.SkipTags) { - // if not match the tags. skip - continue - } - // hosts should contain all host's name. hosts should not be empty. - var hosts []string - if ahn, err := v.Get(variable.Hostnames{Name: play.PlayHost.Hosts}); err == nil { - hosts = ahn.([]string) - } - if len(hosts) == 0 { - klog.V(4).ErrorS(nil, "Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return fmt.Errorf("hosts is empty") - } - - // when gather_fact is set. get host's information from remote. - if play.GatherFacts { - for _, h := range hosts { - gfv, err := getGatherFact(ctx, h, v) - if err != nil { - klog.V(4).ErrorS(err, "Get gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "host", h) - return err - } - // merge host information to runtime variable - if err := v.Merge(variable.HostMerge{ - HostNames: []string{h}, - Data: gfv, - }); err != nil { - klog.V(4).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "host", h) - return err - } - } - } - - // Batch execution, with each batch being a group of hosts run in serial. - var batchHosts [][]string - if play.RunOnce { - // runOnce only run in first node - batchHosts = [][]string{{hosts[0]}} - } else { - // group hosts by serial. run the playbook by serial - batchHosts, err = converter.GroupHostBySerial(hosts, play.Serial.Data) - if err != nil { - klog.V(4).ErrorS(err, "Group host by serial error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return err - } - } - - // generate task by each batch. - for _, serials := range batchHosts { - // each batch hosts should not be empty. - if len(serials) == 0 { - klog.V(4).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return fmt.Errorf("host is empty") - } - - // generate playbook uid. - uid := uuid.NewString() - // set play location - if err := v.Merge(variable.LocationMerge{ - ParentUID: string(pipeline.UID), - UID: uid, - Name: play.Name, - Type: variable.BlockLocation, - Vars: play.Vars, - }); err != nil { - klog.V(4).ErrorS(err, "set block location for play", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name) - return err - } - // generate task from pre tasks - preTasks, err := c.createTasks(ctx, createTasksOptions{ - variable: v, - pipeline: pipeline, - hosts: serials, - blocks: play.PreTasks, - uid: uid, - role: "", - when: nil, - locationType: variable.BlockLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get pre task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name) - return err - } - - nsTasks.Items = append(nsTasks.Items, preTasks...) - // generate task from role - for _, role := range play.Roles { - roleuid := uuid.NewString() - if err := v.Merge(variable.LocationMerge{ - ParentUID: uid, - UID: roleuid, - Name: role.Role, - Type: variable.BlockLocation, - Vars: role.Vars, - }); err != nil { - klog.V(4).ErrorS(err, "set block location for role", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name, "role", role.Role) - return err - } - roleTasks, err := c.createTasks(ctx, createTasksOptions{ - variable: v, - pipeline: pipeline, - hosts: serials, - blocks: role.Block, - uid: roleuid, - role: role.Role, - when: role.When.Data, - locationType: variable.BlockLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get role task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name, "role", role.Role) - return err - } - nsTasks.Items = append(nsTasks.Items, roleTasks...) - } - // generate task from tasks - tasks, err := c.createTasks(ctx, createTasksOptions{ - variable: v, - pipeline: pipeline, - hosts: serials, - blocks: play.Tasks, - uid: uid, - role: "", - when: nil, - locationType: variable.BlockLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name) - return err - } - nsTasks.Items = append(nsTasks.Items, tasks...) - // generate task from post tasks - postTasks, err := c.createTasks(ctx, createTasksOptions{ - variable: v, - pipeline: pipeline, - hosts: serials, - blocks: play.Tasks, - uid: uid, - role: "", - when: nil, - locationType: variable.BlockLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get post task from play error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline), "play", play.Name) - return err - } - nsTasks.Items = append(nsTasks.Items, postTasks...) - } - } - - return nil -} - -type createTasksOptions struct { - // pipeline level config - variable variable.Variable - pipeline *kubekeyv1.Pipeline - // playbook level config - hosts []string // which hosts will run playbook - // blocks level config - blocks []kkcorev1.Block - uid string // the parent location uid for blocks - role string // role name of blocks - when []string // when condition for blocks - locationType variable.LocationType // location type for blocks -} - -// createTasks convert ansible block to task -func (k *taskController) createTasks(ctx context.Context, options createTasksOptions) ([]kubekeyv1alpha1.Task, error) { - var tasks []kubekeyv1alpha1.Task - for _, at := range options.blocks { - if !at.Taggable.IsEnabled(options.pipeline.Spec.Tags, options.pipeline.Spec.SkipTags) { - continue - } - - switch { - case len(at.Block) != 0: - uid := uuid.NewString() - // set block location - if err := options.variable.Merge(variable.LocationMerge{ - UID: uid, - ParentUID: options.uid, - Type: options.locationType, - Name: at.Name, - Vars: at.Vars, - }); err != nil { - klog.V(4).ErrorS(err, "set block location for block", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - - // add block - blockTasks, err := k.createTasks(ctx, createTasksOptions{ - hosts: options.hosts, - role: options.role, - variable: options.variable, - pipeline: options.pipeline, - blocks: at.Block, - when: append(options.when, at.When.Data...), - uid: uid, - locationType: variable.BlockLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get block task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - tasks = append(tasks, blockTasks...) - - if len(at.Always) != 0 { - alwaysTasks, err := k.createTasks(ctx, createTasksOptions{ - variable: options.variable, - pipeline: options.pipeline, - hosts: options.hosts, - blocks: at.Always, - uid: uid, - role: options.role, - when: append(options.when, at.When.Data...), - locationType: variable.AlwaysLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get always task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - tasks = append(tasks, alwaysTasks...) - } - if len(at.Rescue) != 0 { - rescueTasks, err := k.createTasks(ctx, createTasksOptions{ - variable: options.variable, - pipeline: options.pipeline, - hosts: options.hosts, - blocks: at.Rescue, - uid: uid, - role: options.role, - when: append(options.when, at.When.Data...), - locationType: variable.RescueLocation, - }) - if err != nil { - klog.V(4).ErrorS(err, "Get rescue task from block error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - tasks = append(tasks, rescueTasks...) - } - case at.IncludeTasks != "": - // do nothing - // set includeTask location - if err := options.variable.Merge(variable.LocationMerge{ - UID: uuid.NewString(), - ParentUID: options.uid, - Type: options.locationType, - Name: at.IncludeTasks, - Vars: at.Vars, - }); err != nil { - klog.V(4).ErrorS(err, "set block location for includeTask", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - default: - task := converter.MarshalBlock(ctx, options.role, options.hosts, append(options.when, at.When.Data...), at) - // complete by pipeline - task.GenerateName = options.pipeline.Name + "-" - task.Namespace = options.pipeline.Namespace - if err := controllerutil.SetControllerReference(options.pipeline, task, k.schema); err != nil { - klog.V(4).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - // complete module by unknown field - for n, a := range at.UnknownFiled { - data, err := json.Marshal(a) - if err != nil { - klog.V(4).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name, "field", n) - return nil, err - } - if m := modules.FindModule(n); m != nil { - task.Spec.Module.Name = n - task.Spec.Module.Args = runtime.RawExtension{Raw: data} - break - } - } - if task.Spec.Module.Name == "" { // action is necessary for a task - klog.V(4).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, fmt.Errorf("no module/action detected in task: %s", task.Name) - } - // create task - if err := k.client.Create(ctx, task); err != nil { - klog.V(4).ErrorS(err, "Create task error", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - tasks = append(tasks, *task) - - // set task location - if err := options.variable.Merge(variable.LocationMerge{ - UID: string(task.UID), - ParentUID: options.uid, - Type: options.locationType, - Name: at.Name, - Vars: at.Vars, - }); err != nil { - klog.V(4).ErrorS(err, "set block location for task", "pipeline", ctrlclient.ObjectKeyFromObject(options.pipeline), "block", at.Name) - return nil, err - } - } - } - return tasks, nil -} - -// Start task controller, deal task in work taskqueue -func (k *taskController) Start(ctx context.Context) error { - go func() { - <-ctx.Done() - k.taskqueue.ShutDown() - }() - // deal work taskqueue - wg := &sync.WaitGroup{} - for i := 0; i < k.MaxConcurrent; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for k.processNextWorkItem(ctx) { - } - }() - } - <-ctx.Done() - wg.Wait() - return nil -} - -func (k *taskController) processNextWorkItem(ctx context.Context) bool { - obj, shutdown := k.taskqueue.Get() - if shutdown { - return false - } - - defer k.taskqueue.Done(obj) - - req, ok := obj.(ctrl.Request) - if !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - k.taskqueue.Forget(obj) - klog.V(4).ErrorS(nil, "Queue item was not a Request", "request", req) - // Return true, don't take a break - return true - } - - result, err := k.taskReconciler.Reconcile(ctx, req) - switch { - case err != nil: - k.taskqueue.AddRateLimited(req) - klog.V(4).ErrorS(err, "Reconciler error", "request", req) - case result.RequeueAfter > 0: - // The result.RequeueAfter request will be lost, if it is returned - // along with a non-nil error. But this is intended as - // We need to drive to stable reconcile loops before queuing due - // to result.RequestAfter - k.taskqueue.Forget(obj) - k.taskqueue.AddAfter(req, result.RequeueAfter) - case result.Requeue: - k.taskqueue.AddRateLimited(req) - default: - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. - k.taskqueue.Forget(obj) - } - return true -} - -func (k *taskController) NeedLeaderElection() bool { - return true -} diff --git a/pkg/variable/helper.go b/pkg/variable/helper.go index 9bee54fb..b97a09e2 100644 --- a/pkg/variable/helper.go +++ b/pkg/variable/helper.go @@ -17,8 +17,13 @@ limitations under the License. package variable import ( + "encoding/json" + "fmt" "path/filepath" + "reflect" + "regexp" "strconv" + "strings" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" @@ -26,49 +31,41 @@ import ( kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" ) -// mergeVariables merge multiple variables into one variable +// combineVariables merge multiple variables into one variable // v2 will override v1 if variable is repeated -func mergeVariables(v1, v2 VariableData) VariableData { - mergedVars := make(VariableData) +func combineVariables(v1, v2 map[string]any) map[string]any { + var f func(val1, val2 any) any + f = func(val1, val2 any) any { + if val1 != nil && reflect.TypeOf(val1).Kind() == reflect.Map && + val2 != nil && reflect.TypeOf(val2).Kind() == reflect.Map { + mergedVars := make(map[string]any) + for _, k := range reflect.ValueOf(val1).MapKeys() { + mergedVars[k.String()] = reflect.ValueOf(val1).MapIndex(k).Interface() + } + for _, k := range reflect.ValueOf(val2).MapKeys() { + mergedVars[k.String()] = f(mergedVars[k.String()], reflect.ValueOf(val2).MapIndex(k).Interface()) + } + return mergedVars + } + return val2 + } + mv := make(map[string]any) for k, v := range v1 { - mergedVars[k] = v + mv[k] = v } for k, v := range v2 { - mergedVars[k] = v + mv[k] = f(mv[k], v) } - return mergedVars + return mv + } -func findLocation(loc *location, uid string) *location { - if uid == loc.UID { - return loc - } - // find from block - for i := range loc.Block { - if r := findLocation(&loc.Block[i], uid); r != nil { - return r - } - } - // find from always - for i := range loc.Always { - if r := findLocation(&loc.Always[i], uid); r != nil { - return r - } - } - // find from rescue - for i := range loc.Rescue { - if r := findLocation(&loc.Rescue[i], uid); r != nil { - return r - } - } - return nil -} - -func convertGroup(inv kubekeyv1.Inventory) VariableData { - groups := make(VariableData) - all := make([]string, 0) +func convertGroup(inv kubekeyv1.Inventory) map[string]any { + groups := make(map[string]any) + all := []string{"localhost"} // set default host for hn := range inv.Spec.Hosts { all = append(all, hn) } @@ -90,113 +87,228 @@ func hostsInGroup(inv kubekeyv1.Inventory, groupName string) []string { return nil } +// mergeSlice with skip repeat value +func mergeSlice(g1, g2 []string) []string { + uniqueValues := make(map[string]bool) + mg := []string{} + + // Add values from the first slice + for _, v := range g1 { + if !uniqueValues[v] { + uniqueValues[v] = true + mg = append(mg, v) + } + } + + // Add values from the second slice + for _, v := range g2 { + if !uniqueValues[v] { + uniqueValues[v] = true + mg = append(mg, v) + } + } + + return mg +} + // StringVar get string value by key -func StringVar(vars VariableData, key string) *string { - value, ok := vars[key] +func StringVar(d map[string]any, args map[string]any, key string) (string, error) { + val, ok := args[key] if !ok { - klog.V(6).InfoS("cannot find variable", "key", key) - return nil + return "", fmt.Errorf("cannot find variable \"%s\"", key) } - sv, ok := value.(string) - if !ok { - klog.V(6).InfoS("variable is not string", "key", key) - return nil - } - return &sv -} -// IntVar get int value by key -func IntVar(vars VariableData, key string) *int { - value, ok := vars[key] + sv, ok := val.(string) if !ok { - klog.V(6).InfoS("cannot find variable", "key", key) - return nil + return "", fmt.Errorf("variable \"%s\" is not string", key) } - // default convert to float64 - number, ok := value.(float64) - if !ok { - klog.V(6).InfoS("variable is not number", "key", key) - return nil - } - vi := int(number) - return &vi -} - -func BoolVar(vars VariableData, key string) *bool { - value, ok := vars[key] - if !ok { - klog.V(6).InfoS("cannot find variable", "key", key) - return nil - } - // default convert to float64 - b, ok := value.(bool) - if !ok { - klog.V(6).InfoS("variable is not bool", "key", key) - return nil - } - return &b + return tmpl.ParseString(d, sv) } // StringSliceVar get string slice value by key -func StringSliceVar(vars VariableData, key string) []string { - value, ok := vars[key] +func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string, error) { + val, ok := vars[key] if !ok { - klog.V(6).InfoS("cannot find variable", "key", key) - return nil + return nil, fmt.Errorf("cannot find variable \"%s\"", key) } - sv, ok := value.([]any) - if !ok { - klog.V(6).InfoS("variable is not string slice", "key", key) - return nil - } - var ss []string - for _, a := range sv { - av, ok := a.(string) - if !ok { - klog.V(6).InfoS("variable is not string", "key", key) - return nil + switch val.(type) { + case []any: + var ss []string + for _, a := range val.([]any) { + av, ok := a.(string) + if !ok { + klog.V(6).InfoS("variable is not string", "key", key) + return nil, nil + } + as, err := tmpl.ParseString(d, av) + if err != nil { + return nil, err + } + ss = append(ss, as) } - ss = append(ss, av) + return ss, nil + case string: + as, err := tmpl.ParseString(d, val.(string)) + if err != nil { + return nil, err + } + var ss []string + if err := json.Unmarshal([]byte(as), &ss); err != nil { + // if is not json format. only return a value contains this + return []string{as}, nil + } + return ss, nil + default: + return nil, fmt.Errorf("unsupport variable \"%s\" type", key) } - return ss } -func Extension2Variables(ext runtime.RawExtension) VariableData { +// IntVar get int value by key +func IntVar(d map[string]any, vars map[string]any, key string) (int, error) { + val, ok := vars[key] + if !ok { + return 0, fmt.Errorf("cannot find variable \"%s\"", key) + } + // default convert to float64 + switch val.(type) { + case float64: + return int(val.(float64)), nil + case string: + vs, err := tmpl.ParseString(d, val.(string)) + if err != nil { + return 0, err + } + return strconv.Atoi(vs) + default: + return 0, fmt.Errorf("unsupport variable \"%s\" type", key) + } +} + +// Extension2Variables convert extension to variables +func Extension2Variables(ext runtime.RawExtension) map[string]any { if len(ext.Raw) == 0 { return nil } - var data VariableData + var data map[string]any if err := yaml.Unmarshal(ext.Raw, &data); err != nil { klog.V(4).ErrorS(err, "failed to unmarshal extension to variables") } return data } -func Extension2Slice(ext runtime.RawExtension) []any { +// Extension2Slice convert extension to slice +func Extension2Slice(d map[string]any, ext runtime.RawExtension) []any { if len(ext.Raw) == 0 { return nil } var data []any - if err := yaml.Unmarshal(ext.Raw, &data); err != nil { - klog.V(4).ErrorS(err, "failed to unmarshal extension to slice") + if err := yaml.Unmarshal(ext.Raw, &data); err == nil { + return data } + + val, err := Extension2String(d, ext) + if err != nil { + klog.ErrorS(err, "extension2string error", "input", string(ext.Raw)) + } + // parse value by pongo2. if + switch { + case regexp.MustCompile(`^<\[\](.*?) Value>$`).MatchString(val): + // in pongo2 cannot get slice value. add extension filter value. + var input = string(ext.Raw) + // try to escape string + if ns, err := strconv.Unquote(string(ext.Raw)); err == nil { + input = ns + } + vv := GetValue(d, input) + if _, ok := vv.([]any); ok { + return vv.([]any) + } + default: + // value is simple string + return []any{val} + } + return data } -func Extension2String(ext runtime.RawExtension) string { +func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error) { if len(ext.Raw) == 0 { - return "" + return "", nil } + var input = string(ext.Raw) // try to escape string if ns, err := strconv.Unquote(string(ext.Raw)); err == nil { - return ns + input = ns } - return string(ext.Raw) + + result, err := tmpl.ParseString(d, input) + if err != nil { + return "", err + } + + return result, nil } func RuntimeDirFromPipeline(obj kubekeyv1.Pipeline) string { return filepath.Join(_const.GetRuntimeDir(), kubekeyv1.SchemeGroupVersion.String(), _const.RuntimePipelineDir, obj.Namespace, obj.Name, _const.RuntimePipelineVariableDir) } + +// GetValue from VariableData by key path +func GetValue(value map[string]any, keys string) any { + switch { + case strings.HasPrefix(keys, "{{") && strings.HasSuffix(keys, "}}"): + // the keys like {{ a.b.c }}. return value[a][b][c] + var result any = value + for _, k := range strings.Split(strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(keys, "{{"), "}}")), ".") { + result = result.(map[string]any)[k] + } + return result + default: + return nil + } +} + +// parseVariable parse all string values to the actual value. +func parseVariable(v any, parseTmplFunc func(string) (string, error)) error { + switch reflect.ValueOf(v).Kind() { + case reflect.Map: + for _, kv := range reflect.ValueOf(v).MapKeys() { + val := reflect.ValueOf(v).MapIndex(kv) + if vv, ok := val.Interface().(string); ok { + if tmpl.IsTmplSyntax(vv) { + newValue, err := parseTmplFunc(vv) + if err != nil { + return err + } + reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue)) + } + } else { + if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { + return err + } + } + } + case reflect.Slice, reflect.Array: + for i := 0; i < reflect.ValueOf(v).Len(); i++ { + val := reflect.ValueOf(v).Index(i) + if vv, ok := val.Interface().(string); ok { + if tmpl.IsTmplSyntax(vv) { + newValue, err := parseTmplFunc(vv) + if err != nil { + return err + } + val.Set(reflect.ValueOf(newValue)) + } + } else { + if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/pkg/variable/helper_test.go b/pkg/variable/helper_test.go index 86ddcc51..0406520a 100644 --- a/pkg/variable/helper_test.go +++ b/pkg/variable/helper_test.go @@ -20,47 +20,49 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" ) func TestMergeVariable(t *testing.T) { testcases := []struct { name string - v1 VariableData - v2 VariableData - excepted VariableData + v1 map[string]any + v2 map[string]any + excepted map[string]any }{ { name: "primary variables value is empty", v1: nil, - v2: VariableData{ + v2: map[string]any{ "a1": "v1", }, - excepted: VariableData{ + excepted: map[string]any{ "a1": "v1", }, }, { name: "auxiliary variables value is empty", - v1: VariableData{ + v1: map[string]any{ "p1": "v1", }, v2: nil, - excepted: VariableData{ + excepted: map[string]any{ "p1": "v1", }, }, { name: "non-repeat value", - v1: VariableData{ + v1: map[string]any{ "p1": "v1", "p2": map[string]any{ "p21": "v21", }, }, - v2: VariableData{ + v2: map[string]any{ "a1": "v1", }, - excepted: VariableData{ + excepted: map[string]any{ "p1": "v1", "p2": map[string]any{ "p21": "v21", @@ -70,14 +72,14 @@ func TestMergeVariable(t *testing.T) { }, { name: "repeat value", - v1: VariableData{ + v1: map[string]any{ "p1": "v1", "p2": map[string]any{ "p21": "v21", "p22": "v22", }, }, - v2: VariableData{ + v2: map[string]any{ "a1": "v1", "p1": "v2", "p2": map[string]any{ @@ -85,12 +87,49 @@ func TestMergeVariable(t *testing.T) { "a21": "v21", }, }, - excepted: VariableData{ + excepted: map[string]any{ "a1": "v1", "p1": "v2", "p2": map[string]any{ "p21": "v22", "a21": "v21", + "p22": "v22", + }, + }, + }, + { + name: "repeat deep value", + v1: map[string]any{ + "p1": map[string]string{ + "p11": "v11", + }, + "p2": map[string]any{ + "p21": "v21", + "p22": "v22", + }, + }, + v2: map[string]any{ + "p1": map[string]string{ + "p21": "v21", + }, + "p2": map[string]any{ + "p21": map[string]any{ + "p211": "v211", + }, + "a21": "v21", + }, + }, + excepted: map[string]any{ + "p1": map[string]any{ + "p11": "v11", + "p21": "v21", + }, + "p2": map[string]any{ + "p21": map[string]any{ + "p211": "v211", + }, + "p22": "v22", + "a21": "v21", }, }, }, @@ -98,8 +137,130 @@ func TestMergeVariable(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - v := mergeVariables(tc.v1, tc.v2) + v := combineVariables(tc.v1, tc.v2) assert.Equal(t, tc.excepted, v) }) } } + +func TestMergeGroup(t *testing.T) { + testcases := []struct { + name string + g1 []string + g2 []string + except []string + }{ + { + name: "non-repeat", + g1: []string{ + "h1", "h2", "h3", + }, + g2: []string{ + "h4", "h5", + }, + except: []string{ + "h1", "h2", "h3", "h4", "h5", + }, + }, + { + name: "repeat value", + g1: []string{ + "h1", "h2", "h3", + }, + g2: []string{ + "h3", "h4", "h5", + }, + except: []string{ + "h1", "h2", "h3", "h4", "h5", + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ac := mergeSlice(tc.g1, tc.g2) + assert.Equal(t, tc.except, ac) + }) + } +} + +func TestParseVariable(t *testing.T) { + testcases := []struct { + name string + data map[string]any + base map[string]any + except map[string]any + }{ + { + name: "parse string", + data: map[string]any{ + "a": "{{ a }}", + }, + base: map[string]any{ + "a": "b", + }, + except: map[string]any{ + "a": "b", + }, + }, + { + name: "parse map", + data: map[string]any{ + "a": "{{ a.b }}", + }, + base: map[string]any{ + "a": map[string]any{ + "b": "c", + }, + }, + except: map[string]any{ + "a": "c", + }, + }, + { + name: "parse slice", + data: map[string]any{ + "a": []string{"{{ b }}"}, + }, + base: map[string]any{ + "b": "c", + }, + except: map[string]any{ + "a": []string{"c"}, + }, + }, + { + name: "parse map in slice", + data: map[string]any{ + "a": []map[string]any{ + { + "a1": []any{"{{ b }}"}, + }, + }, + }, + base: map[string]any{ + "b": "c", + }, + except: map[string]any{ + "a": []map[string]any{ + { + "a1": []any{"c"}, + }, + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + err := parseVariable(tc.data, func(s string) (string, error) { + // parse use total variable. the task variable should not contain template syntax. + return tmpl.ParseString(combineVariables(tc.data, tc.base), s) + }) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, tc.except, tc.data) + }) + } +} diff --git a/pkg/variable/internal.go b/pkg/variable/internal.go index 1d4b832c..1f3c509c 100644 --- a/pkg/variable/internal.go +++ b/pkg/variable/internal.go @@ -20,13 +20,18 @@ import ( "encoding/json" "fmt" "reflect" + "regexp" + "strconv" + "strings" "sync" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" "k8s.io/utils/strings/slices" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/variable/source" ) @@ -47,10 +52,6 @@ type value struct { kubekeyv1.Inventory `json:"-"` // Hosts store the variable for running tasks on specific hosts Hosts map[string]host `json:"hosts"` - // Location is the complete location index. - // This index can help us determine the specific location of the task, - // enabling us to retrieve the task's parameters and establish the execution order. - Location *location `json:"location"` } func (v value) deepCopy() value { @@ -62,100 +63,72 @@ func (v value) deepCopy() value { if err := json.Unmarshal(data, &nv); err != nil { return value{} } + return nv } -// getGlobalVars get defined variable from inventory and config -func (v *value) getGlobalVars(hostname string) VariableData { - // get host vars - hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname]) - // set inventory_hostname to hostVars - // inventory_hostname" is the hostname configured in the inventory file. - hostVars = mergeVariables(hostVars, VariableData{ - "inventory_hostname": hostname, - }) - // merge group vars to host vars - for _, gv := range v.Inventory.Spec.Groups { - if slices.Contains(gv.Hosts, hostname) { - hostVars = mergeVariables(hostVars, Extension2Variables(gv.Vars)) +// getParameterVariable get defined variable from inventory and config +func (v value) getParameterVariable() map[string]any { + globalHosts := make(map[string]any) + for hostname := range v.Hosts { + // get host vars + hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname]) + // set inventory_name to hostVars + // "inventory_name" is the hostname configured in the inventory file. + hostVars = combineVariables(hostVars, map[string]any{ + _const.VariableHostName: hostname, + }) + // merge group vars to host vars + for _, gv := range v.Inventory.Spec.Groups { + if slices.Contains(gv.Hosts, hostname) { + hostVars = combineVariables(hostVars, Extension2Variables(gv.Vars)) + } } - } - // merge inventory vars to host vars - hostVars = mergeVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars)) - // merge config vars to host vars - hostVars = mergeVariables(hostVars, Extension2Variables(v.Config.Spec)) + // merge inventory vars to host vars + hostVars = combineVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars)) + // merge config vars to host vars + hostVars = combineVariables(hostVars, Extension2Variables(v.Config.Spec)) + globalHosts[hostname] = hostVars + } + var externalVal = make(map[string]any) // external vars - hostVars = mergeVariables(hostVars, VariableData{ - "groups": convertGroup(v.Inventory), - }) - - return hostVars -} - -type location struct { - // UID is current location uid - UID string `json:"uid"` - // PUID is the parent uid for current location - PUID string `json:"puid"` - // Name is the name of current location - Name string `json:"name"` - // Vars is the variable of current location - Vars VariableData `json:"vars,omitempty"` - - Block []location `json:"block,omitempty"` - Always []location `json:"always,omitempty"` - Rescue []location `json:"rescue,omitempty"` -} - -// VariableData is the variable data -type VariableData map[string]any - -func (v VariableData) String() string { - data, err := json.Marshal(v) - if err != nil { - klog.V(4).ErrorS(err, "marshal in error", "data", v) - return "" + for hostname := range globalHosts { + var val = make(map[string]any) + val = combineVariables(val, map[string]any{ + _const.VariableGlobalHosts: globalHosts, + }) + val = combineVariables(val, map[string]any{ + _const.VariableGroups: convertGroup(v.Inventory), + }) + externalVal[hostname] = val } - return string(data) -} -func (v VariableData) DeepCopy() VariableData { - nv := make(VariableData) - for k, vv := range v { - nv[k] = vv - } - return nv + return combineVariables(globalHosts, externalVal) } type host struct { - Vars VariableData `json:"vars"` - RuntimeVars map[string]VariableData `json:"runtime"` + // RemoteVars sources from remote node config. as gather_fact.scope all tasks. it should not be changed. + RemoteVars map[string]any `json:"remote"` + // RuntimeVars sources from runtime. store which defined in each appeared block vars. + RuntimeVars map[string]any `json:"runtime"` } func (v *variable) Key() string { return v.key } -func (v *variable) Get(option GetOption) (any, error) { - return option.filter(*v.value) +func (v *variable) Get(f GetFunc) (any, error) { + return f(v) } -func (v *variable) Merge(mo ...MergeOption) error { +func (v *variable) Merge(f MergeFunc) error { v.Lock() defer v.Unlock() old := v.value.deepCopy() - for _, o := range mo { - if err := o.mergeTo(v.value); err != nil { - return err - } - } - - if !reflect.DeepEqual(old.Location, v.value.Location) { - if err := v.syncLocation(); err != nil { - klog.ErrorS(err, "sync location error") - } + if err := f(v); err != nil { + return err } for hn, hv := range v.value.Hosts { @@ -169,19 +142,6 @@ func (v *variable) Merge(mo ...MergeOption) error { return nil } -func (v *variable) syncLocation() error { - data, err := json.MarshalIndent(v.value.Location, "", " ") - if err != nil { - klog.ErrorS(err, "marshal location data error") - return err - } - if err := v.source.Write(data, _const.RuntimePipelineVariableLocationFile); err != nil { - klog.V(4).ErrorS(err, "write location data to local file error", "filename", _const.RuntimePipelineVariableLocationFile) - return err - } - return nil -} - // syncHosts sync hosts data to local file. If hostname is empty, sync all hosts func (v *variable) syncHosts(hostname ...string) error { for _, hn := range hostname { @@ -196,29 +156,174 @@ func (v *variable) syncHosts(hostname ...string) error { } } } + return nil } -// mergeSlice with skip repeat value -func mergeSlice(g1, g2 []string) []string { - uniqueValues := make(map[string]bool) - mg := []string{} - - // Add values from the first slice - for _, v := range g1 { - if !uniqueValues[v] { - uniqueValues[v] = true - mg = append(mg, v) +// GetHostnames get all hostnames from a group or host +var GetHostnames = func(name []string) GetFunc { + return func(v Variable) (any, error) { + if _, ok := v.(*variable); !ok { + return nil, fmt.Errorf("variable type error") } - } + data := v.(*variable).value - // Add values from the second slice - for _, v := range g2 { - if !uniqueValues[v] { - uniqueValues[v] = true - mg = append(mg, v) + var hs []string + for _, n := range name { + // add host to hs + if _, ok := data.Hosts[n]; ok { + hs = append(hs, n) + } + // add group's host to gs + for gn, gv := range convertGroup(data.Inventory) { + if gn == n { + hs = mergeSlice(hs, gv.([]string)) + break + } + } + + // Add the specified host in the specified group to the hs. + regexForIndex := regexp.MustCompile(`^(.*)\[\d\]$`) + if match := regexForIndex.FindStringSubmatch(strings.TrimSpace(n)); match != nil { + index, err := strconv.Atoi(match[2]) + if err != nil { + klog.V(4).ErrorS(err, "convert index to int error", "index", match[2]) + return nil, err + } + if group, ok := convertGroup(data.Inventory)[match[1]].([]string); ok { + if index >= len(group) { + return nil, fmt.Errorf("index %v out of range for group %s", index, group) + } + hs = append(hs, group[index]) + } + } + + // add random host in group + regexForRandom := regexp.MustCompile(`^(.+?)\s*\|\s*random$`) + if match := regexForRandom.FindStringSubmatch(strings.TrimSpace(n)); match != nil { + if group, ok := convertGroup(data.Inventory)[match[1]].([]string); ok { + hs = append(hs, group[rand.Intn(len(group))]) + } + } } - } - return mg + return hs, nil + } +} + +// GetParamVariable get param variable which is combination of inventory, config. +var GetParamVariable = func(hostname string) GetFunc { + return func(v Variable) (any, error) { + if _, ok := v.(*variable); !ok { + return nil, fmt.Errorf("variable type error") + } + data := v.(*variable).value + if hostname == "" { + return data.getParameterVariable(), nil + } + return data.getParameterVariable()[hostname], nil + } +} + +// MergeRemoteVariable merge variable to remote. +var MergeRemoteVariable = func(hostname string, data map[string]any) MergeFunc { + return func(v Variable) error { + if _, ok := v.(*variable); !ok { + return fmt.Errorf("variable type error") + } + vv := v.(*variable).value + + if hostname == "" { + return fmt.Errorf("when merge source is remote. HostName cannot be empty") + } + if _, ok := vv.Hosts[hostname]; !ok { + return fmt.Errorf("when merge source is remote. HostName %s not exist", hostname) + } + + // it should not be changed + if hv := vv.Hosts[hostname]; len(hv.RemoteVars) == 0 { + hv.RemoteVars = data + vv.Hosts[hostname] = hv + } + + return nil + } +} + +// MergeRuntimeVariable parse variable by specific host and merge to the host. +var MergeRuntimeVariable = func(hostName string, vd map[string]any) MergeFunc { + return func(v Variable) error { + vv := v.(*variable).value + // merge to specify host + curVariable, err := v.Get(GetAllVariable(hostName)) + if err != nil { + return err + } + // parse variable + if err := parseVariable(vd, func(s string) (string, error) { + // parse use total variable. the task variable should not contain template syntax. + return tmpl.ParseString(combineVariables(vd, curVariable.(map[string]any)), s) + }); err != nil { + return err + } + + if _, ok := v.(*variable); !ok { + return fmt.Errorf("variable type error") + } + hv := vv.Hosts[hostName] + hv.RuntimeVars = combineVariables(hv.RuntimeVars, vd) + vv.Hosts[hostName] = hv + + return nil + } +} + +// MergeAllRuntimeVariable parse variable by specific host and merge to all hosts. +var MergeAllRuntimeVariable = func(hostName string, vd map[string]any) MergeFunc { + return func(v Variable) error { + vv := v.(*variable).value + // merge to specify host + curVariable, err := v.Get(GetAllVariable(hostName)) + if err != nil { + return err + } + // parse variable + if err := parseVariable(vd, func(s string) (string, error) { + // parse use total variable. the task variable should not contain template syntax. + return tmpl.ParseString(combineVariables(vd, curVariable.(map[string]any)), s) + }); err != nil { + return err + } + + for h := range vv.Hosts { + if _, ok := v.(*variable); !ok { + return fmt.Errorf("variable type error") + } + hv := vv.Hosts[h] + hv.RuntimeVars = combineVariables(hv.RuntimeVars, vd) + vv.Hosts[h] = hv + } + + return nil + } +} + +var GetAllVariable = func(hostName string) GetFunc { + return func(v Variable) (any, error) { + if _, ok := v.(*variable); !ok { + return nil, fmt.Errorf("variable type error") + } + data := v.(*variable).value + result := make(map[string]any) + // find from runtime + result = combineVariables(result, data.Hosts[hostName].RuntimeVars) + // find from remote + result = combineVariables(result, data.Hosts[hostName].RemoteVars) + // find from global. + if vv, ok := data.getParameterVariable()[hostName]; ok { + result = combineVariables(result, vv.(map[string]any)) + } + + return result, nil + } } diff --git a/pkg/variable/internal_test.go b/pkg/variable/internal_test.go index 3cd6da0a..2c6263c8 100644 --- a/pkg/variable/internal_test.go +++ b/pkg/variable/internal_test.go @@ -20,45 +20,77 @@ import ( "testing" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" + + kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" ) -func TestMergeGroup(t *testing.T) { +func TestGetAllVariable(t *testing.T) { testcases := []struct { name string - g1 []string - g2 []string - except []string + value value + except map[string]any }{ { - name: "non-repeat", - g1: []string{ - "h1", "h2", "h3", + name: "global override runtime variable", + value: value{ + Config: kubekeyv1.Config{ + Spec: runtime.RawExtension{ + Raw: []byte(` +artifact: + images: + - abc +`), + }, + }, + Inventory: kubekeyv1.Inventory{}, + Hosts: map[string]host{ + "test": { + RuntimeVars: map[string]any{ + "artifact": map[string]any{ + "k1": "v1", + "k2": 2, + "k3": true, + "k4": map[string]any{ + "k41": "v41", + }, + }, + }, + }, + }, }, - g2: []string{ - "h4", "h5", - }, - except: []string{ - "h1", "h2", "h3", "h4", "h5", - }, - }, - { - name: "repeat value", - g1: []string{ - "h1", "h2", "h3", - }, - g2: []string{ - "h3", "h4", "h5", - }, - except: []string{ - "h1", "h2", "h3", "h4", "h5", + except: map[string]any{ + "artifact": map[string]any{ + "k1": "v1", + "k2": 2, + "k3": true, + "k4": map[string]any{ + "k41": "v41", + }, + "images": []any{"abc"}, + }, + "groups": map[string]interface{}{"all": []string{"localhost"}}, + "inventory_hosts": map[string]interface{}{ + "test": map[string]interface{}{ + "artifact": map[string]interface{}{ + "images": []interface{}{"abc"}, + }, + "inventory_name": "test", + }, + }, + "inventory_name": "test", }, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ac := mergeSlice(tc.g1, tc.g2) - assert.Equal(t, tc.except, ac) + v := variable{value: &tc.value} + result, err := v.Get(GetAllVariable("test")) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, tc.except, result) }) } } diff --git a/pkg/variable/variable.go b/pkg/variable/variable.go index a2f56f4c..99f7ea4c 100644 --- a/pkg/variable/variable.go +++ b/pkg/variable/variable.go @@ -21,8 +21,6 @@ import ( "encoding/json" "fmt" "path/filepath" - "regexp" - "strconv" "strings" "k8s.io/apimachinery/pkg/types" @@ -31,510 +29,74 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/variable/source" ) +type GetFunc func(Variable) (any, error) + +type MergeFunc func(Variable) error + type Variable interface { Key() string - Get(option GetOption) (any, error) - Merge(option ...MergeOption) error -} - -type Options struct { - Ctx context.Context - Client ctrlclient.Client - Pipeline kubekeyv1.Pipeline + Get(GetFunc) (any, error) + Merge(MergeFunc) error } // New variable. generate value from config args. and render to source. -func New(o Options) (Variable, error) { +func New(client ctrlclient.Client, pipeline kubekeyv1.Pipeline) (Variable, error) { // new source - s, err := source.New(RuntimeDirFromPipeline(o.Pipeline)) + s, err := source.New(RuntimeDirFromPipeline(pipeline)) if err != nil { - klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(RuntimeDirFromPipeline(o.Pipeline)), "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(RuntimeDirFromPipeline(pipeline)), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } // get config var config = &kubekeyv1.Config{} - if err := o.Client.Get(o.Ctx, types.NamespacedName{o.Pipeline.Spec.ConfigRef.Namespace, o.Pipeline.Spec.ConfigRef.Name}, config); err != nil { - klog.V(4).ErrorS(err, "get config from pipeline error", "config", o.Pipeline.Spec.ConfigRef, "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + if err := client.Get(context.Background(), types.NamespacedName{Namespace: pipeline.Spec.ConfigRef.Namespace, Name: pipeline.Spec.ConfigRef.Name}, config); err != nil { + klog.V(4).ErrorS(err, "get config from pipeline error", "config", pipeline.Spec.ConfigRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } // get inventory var inventory = &kubekeyv1.Inventory{} - if err := o.Client.Get(o.Ctx, types.NamespacedName{o.Pipeline.Spec.InventoryRef.Namespace, o.Pipeline.Spec.InventoryRef.Name}, inventory); err != nil { - klog.V(4).ErrorS(err, "get inventory from pipeline error", "inventory", o.Pipeline.Spec.InventoryRef, "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + if err := client.Get(context.Background(), types.NamespacedName{Namespace: pipeline.Spec.InventoryRef.Namespace, Name: pipeline.Spec.InventoryRef.Name}, inventory); err != nil { + klog.V(4).ErrorS(err, "get inventory from pipeline error", "inventory", pipeline.Spec.InventoryRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } v := &variable{ - key: string(o.Pipeline.UID), + key: string(pipeline.UID), source: s, value: &value{ Config: *config, Inventory: *inventory, - Hosts: map[string]host{ - _const.LocalHostName: {}, // set default host - }, + Hosts: make(map[string]host), }, } + for _, hostname := range convertGroup(*inventory)["all"].([]string) { + v.value.Hosts[hostname] = host{ + RemoteVars: make(map[string]any), + RuntimeVars: make(map[string]any), + } + } + // read data from source data, err := v.source.Read() if err != nil { - klog.V(4).ErrorS(err, "read data from source error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + klog.V(4).ErrorS(err, "read data from source error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } for k, d := range data { - if k == _const.RuntimePipelineVariableLocationFile { - // set location - if err := json.Unmarshal(d, &v.value.Location); err != nil { - klog.V(4).ErrorS(err, "unmarshal location error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) - return nil, err - } - } else { - // set hosts - h := host{} - if err := json.Unmarshal(d, &h); err != nil { - klog.V(4).ErrorS(err, "unmarshal host error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) - return nil, err - } - v.value.Hosts[strings.TrimSuffix(k, ".json")] = h + // set hosts + h := host{} + if err := json.Unmarshal(d, &h); err != nil { + klog.V(4).ErrorS(err, "unmarshal host error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return nil, err } + v.value.Hosts[strings.TrimSuffix(k, ".json")] = h } + return v, nil } -type GetOption interface { - filter(data value) (any, error) -} - -// KeyPath get a key path variable -type KeyPath struct { - // HostName which host obtain the variable - HostName string - // LocationUID locate which variable belong to - LocationUID string - // Path base top variable. - Path []string -} - -func (k KeyPath) filter(data value) (any, error) { - // find value from location - var getLocationFunc func(uid string) any - getLocationFunc = func(uid string) any { - if loc := findLocation(data.Location, uid); loc != nil { - // find value from task - if v, ok := data.Hosts[k.HostName].RuntimeVars[uid]; ok { - if result := k.getValue(v, k.Path...); result != nil { - return result - } - } - if result := k.getValue(loc.Vars, k.Path...); result != nil { - return result - } - if loc.PUID != "" { - return getLocationFunc(loc.PUID) - } - } - return nil - } - if result := getLocationFunc(k.LocationUID); result != nil { - return result, nil - } - - // find value from host - if result := k.getValue(data.Hosts[k.HostName].Vars, k.Path...); result != nil { - return result, nil - } - - // find value from global - if result := k.getValue(data.getGlobalVars(k.HostName), k.Path...); result != nil { - return result, nil - } - return nil, nil -} - -// getValue from variable.VariableData use key path. if key path is empty return nil -func (k KeyPath) getValue(value VariableData, key ...string) any { - if len(key) == 0 { - return nil - } - var result any - result = value - for _, s := range key { - result = result.(VariableData)[s] - } - return result -} - -// ParentLocation UID for current location -type ParentLocation struct { - LocationUID string -} - -func (p ParentLocation) filter(data value) (any, error) { - loc := findLocation(data.Location, p.LocationUID) - if loc != nil { - return loc.PUID, nil - } - return nil, fmt.Errorf("cannot find location %s", p.LocationUID) -} - -// LocationVars get all variable for location -type LocationVars struct { - // HostName which host obtain the variable - // if HostName is empty. get value from global - HostName string - // LocationUID locate which variable belong to - LocationUID string -} - -func (b LocationVars) filter(data value) (any, error) { - var result VariableData - if b.HostName != "" { - // find from host runtime - if v, ok := data.Hosts[b.HostName].RuntimeVars[b.LocationUID]; ok { - result = mergeVariables(result, v) - } - - // merge location variable - var mergeLocationVarsFunc func(uid string) - mergeLocationVarsFunc = func(uid string) { - // find value from task - if v, ok := data.Hosts[b.HostName].RuntimeVars[uid]; ok { - result = mergeVariables(result, v) - } - if loc := findLocation(data.Location, uid); loc != nil { - result = mergeVariables(result, loc.Vars) - if loc.PUID != "" { - mergeLocationVarsFunc(loc.PUID) - } - } - } - mergeLocationVarsFunc(b.LocationUID) - - // get value from host - result = mergeVariables(result, data.Hosts[b.HostName].Vars) - } - - // get value from global - result = mergeVariables(result, data.getGlobalVars(b.HostName)) - - return result, nil -} - -// HostVars get all top variable for a host -type HostVars struct { - HostName string -} - -func (k HostVars) filter(data value) (any, error) { - return mergeVariables(data.getGlobalVars(k.HostName), data.Hosts[k.HostName].Vars), nil -} - -// Hostnames from array contains group name or host name -type Hostnames struct { - Name []string -} - -func (g Hostnames) filter(data value) (any, error) { - var hs []string - for _, n := range g.Name { - // add host to hs - if _, ok := data.Hosts[n]; ok { - hs = append(hs, n) - } - // add group's host to gs - for gn, gv := range convertGroup(data.Inventory) { - if gn == n { - hs = mergeSlice(hs, gv.([]string)) - break - } - } - - // Add the specified host in the specified group to the hs. - regex := regexp.MustCompile(`^(.*)\[\d\]$`) - if match := regex.FindStringSubmatch(n); match != nil { - index, err := strconv.Atoi(match[2]) - if err != nil { - klog.V(4).ErrorS(err, "convert index to int error", "index", match[2]) - return nil, err - } - for gn, gv := range data.Inventory.Spec.Groups { - if gn == match[1] { - hs = append(hs, gv.Hosts[index]) - break - } - } - } - } - return hs, nil -} - -// Infer the next phase of the task.by it dependency. -// NOTE: To optimize performance, check only one dependency of a task instead of all dependencies. -// Therefore, do not assign roles without tasks. -type InferPhase struct { - LocationUID string - Tasks []kubekeyv1alpha1.Task -} - -func (f InferPhase) filter(data value) (any, error) { - loc := findLocation(data.Location, f.LocationUID) - if loc == nil { - return nil, fmt.Errorf("cannot found location %s", f.LocationUID) - } - return f.getDependencyLocationUIDS(data, loc) -} - -// If dependency tasks is not complete. waiting. -var succeedExecuteStrategy = func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase { - if len(tasks) == 0 { // non-dependency - return kubekeyv1alpha1.TaskPhaseRunning - } - skip := true - for _, t := range tasks { - if !t.IsComplete() { - return kubekeyv1alpha1.TaskPhasePending - } - if t.IsFailed() { - return kubekeyv1alpha1.TaskPhaseSkipped - } - if !t.IsSkipped() { - skip = false - } - } - if skip { - return kubekeyv1alpha1.TaskPhaseSkipped - } - return kubekeyv1alpha1.TaskPhaseRunning -} - -// if tasks has failed. execute current task. -var failedExecuteStrategy = func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase { - if len(tasks) == 0 { // non-dependency - return kubekeyv1alpha1.TaskPhaseRunning - } - skip := true - for _, t := range tasks { - if !t.IsComplete() { - return kubekeyv1alpha1.TaskPhasePending - } - if t.IsFailed() { - return kubekeyv1alpha1.TaskPhaseRunning - } - if !t.IsSkipped() { - skip = false - } - } - if skip { - return kubekeyv1alpha1.TaskPhaseRunning - } - return kubekeyv1alpha1.TaskPhaseSkipped -} - -// If dependency tasks is skipped. skip. -var alwaysExecuteStrategy = func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase { - if len(tasks) == 0 { // non-dependency - return kubekeyv1alpha1.TaskPhaseRunning - } - skip := true - for _, t := range tasks { - if !t.IsComplete() { - return kubekeyv1alpha1.TaskPhasePending - } - if !t.IsSkipped() { - skip = false - } - } - if skip { - return kubekeyv1alpha1.TaskPhaseSkipped - } - return kubekeyv1alpha1.TaskPhaseRunning -} - -func (f InferPhase) getDependencyLocationUIDS(data value, loc *location) (kubekeyv1alpha1.TaskPhase, error) { - if loc.PUID == "" { - return kubekeyv1alpha1.TaskPhaseRunning, nil - } - - // Find the parent location and, based on where the current location is within the parent location, retrieve the dependent tasks. - ploc := findLocation(data.Location, loc.PUID) - - // location in Block. - for i, l := range ploc.Block { - if l.UID == loc.UID { - // When location is the first element, it is necessary to check the dependency of its parent location. - if i == 0 { - return f.getDependencyLocationUIDS(data, ploc) - } - - // When location is not the first element, dependency location is the preceding element in the same array. - return succeedExecuteStrategy(f.findAllTasks(ploc.Block[i-1], f.Tasks)), nil - } - } - - // location in Rescue - for i, l := range ploc.Rescue { - if l.UID == loc.UID { - // When location is the first element, dependency location is all task of sibling block array. - if i == 0 { - return failedExecuteStrategy(f.findAllTasks(ploc.Block[len(ploc.Block)-1], f.Tasks)), nil - } - // When location is not the first element, dependency location is the preceding element in the same array - return failedExecuteStrategy(f.findAllTasks(ploc.Rescue[i-1], f.Tasks)), nil - } - } - - // If location in Always - for i, l := range ploc.Always { - if l.UID == loc.UID { - // When location is the first element, dependency location is all task of sibling block array - if i == 0 { - return alwaysExecuteStrategy(f.findAllTasks(ploc.Block[len(ploc.Block)-1], f.Tasks)), nil - } - // When location is not the first element, dependency location is the preceding element in the same array - return alwaysExecuteStrategy(f.findAllTasks(ploc.Always[i-1], f.Tasks)), nil - } - } - - return "", fmt.Errorf("connot find location %s in parent %s", loc.UID, loc.PUID) -} - -func (f InferPhase) findAllTasks(loc location, allTasks []kubekeyv1alpha1.Task) []kubekeyv1alpha1.Task { - if len(loc.Block) == 0 { // if block is empty the location is task graph - for _, task := range allTasks { - if string(task.UID) == loc.UID { - return []kubekeyv1alpha1.Task{task} - } - } - } - var result = make([]kubekeyv1alpha1.Task, 0) - for _, l := range loc.Block { - result = append(result, f.findAllTasks(l, allTasks)...) - } - for _, l := range loc.Rescue { - result = append(result, f.findAllTasks(l, allTasks)...) - } - for _, l := range loc.Always { - result = append(result, f.findAllTasks(l, allTasks)...) - } - - return result -} - -type MergeOption interface { - mergeTo(data *value) error -} - -// HostMerge merge variable to host -type HostMerge struct { - // HostName of host - HostNames []string - // LocationVars to find block. Only merge the last level block. - //LocationVars []string - LocationUID string - // Data to merge - Data VariableData -} - -func (h HostMerge) mergeTo(v *value) error { - for _, name := range h.HostNames { - hv := v.Hosts[name] - if h.LocationUID == "" { // merge to host var - hv.Vars = mergeVariables(h.Data, v.Hosts[name].Vars) - } else { // merge to host runtime - if hv.RuntimeVars == nil { - hv.RuntimeVars = make(map[string]VariableData) - } - hv.RuntimeVars[h.LocationUID] = mergeVariables(v.Hosts[name].RuntimeVars[h.LocationUID], h.Data) - } - v.Hosts[name] = hv - } - return nil -} - -type LocationType string - -const ( - BlockLocation LocationType = "block" - AlwaysLocation LocationType = "always" - RescueLocation LocationType = "rescue" -) - -// LocationMerge merge variable to location -type LocationMerge struct { - UID string - ParentUID string - Type LocationType - Name string - Vars VariableData -} - -func (t LocationMerge) mergeTo(v *value) error { - if t.ParentUID == "" { // set the top location - v.Location = &location{ - Name: t.Name, - PUID: t.ParentUID, - UID: t.UID, - Vars: t.Vars, - } - return nil - } - // find parent graph - parentLocation := findLocation(v.Location, t.ParentUID) - if parentLocation == nil { - return fmt.Errorf("cannot find parent location %s", t.ParentUID) - } - - switch t.Type { - case BlockLocation: - for _, loc := range parentLocation.Block { - if loc.UID == t.UID { - klog.Warningf("task graph %s already exist", t.UID) - return nil - } - } - parentLocation.Block = append(parentLocation.Block, location{ - Name: t.Name, - PUID: t.ParentUID, - UID: t.UID, - Vars: t.Vars, - }) - case AlwaysLocation: - for _, loc := range parentLocation.Always { - if loc.UID == t.UID { - klog.Warningf("task graph %s already exist", t.UID) - return nil - } - } - parentLocation.Always = append(parentLocation.Always, location{ - Name: t.Name, - PUID: t.ParentUID, - UID: t.UID, - Vars: t.Vars, - }) - case RescueLocation: - for _, loc := range parentLocation.Rescue { - if loc.UID == t.UID { - klog.Warningf("task graph %s already exist", t.UID) - return nil - } - } - parentLocation.Rescue = append(parentLocation.Rescue, location{ - Name: t.Name, - PUID: t.ParentUID, - UID: t.UID, - Vars: t.Vars, - }) - default: - return fmt.Errorf("unknown LocationType. only support block,always,rescue ") - } - - return nil -} - // Cache is a cache for variable var Cache = cgcache.NewStore(func(obj interface{}) (string, error) { v, ok := obj.(Variable) @@ -544,23 +106,23 @@ var Cache = cgcache.NewStore(func(obj interface{}) (string, error) { return v.Key(), nil }) -func GetVariable(o Options) (Variable, error) { - vars, ok, err := Cache.GetByKey(string(o.Pipeline.UID)) +func GetVariable(client ctrlclient.Client, pipeline kubekeyv1.Pipeline) (Variable, error) { + vars, ok, err := Cache.GetByKey(string(pipeline.UID)) if err != nil { - klog.V(5).ErrorS(err, "get variable error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + klog.V(5).ErrorS(err, "get variable error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } if ok { return vars.(Variable), nil } // add new variable to cache - nv, err := New(o) + nv, err := New(client, pipeline) if err != nil { - klog.V(5).ErrorS(err, "create variable error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + klog.V(5).ErrorS(err, "create variable error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } if err := Cache.Add(nv); err != nil { - klog.V(5).ErrorS(err, "add variable to store error", "pipeline", ctrlclient.ObjectKeyFromObject(&o.Pipeline)) + klog.V(5).ErrorS(err, "add variable to store error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) return nil, err } return nv, nil @@ -568,6 +130,8 @@ func GetVariable(o Options) (Variable, error) { func CleanVariable(p *kubekeyv1.Pipeline) { if _, ok, err := Cache.GetByKey(string(p.UID)); err == nil && ok { - Cache.Delete(string(p.UID)) + if err := Cache.Delete(string(p.UID)); err != nil { + klog.ErrorS(err, "delete variable from cache error", "pipeline", ctrlclient.ObjectKeyFromObject(p)) + } } } diff --git a/project/inventory/config.yaml b/project/inventory/config.yaml deleted file mode 100644 index d3e69b34..00000000 --- a/project/inventory/config.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: kubekey.kubesphere.io/v1 -kind: Config -metadata: - name: example -spec: -### - # zone for kk. how to download files - kkzone: cn -### [ variable for precheck ] - # It is possible to deploy etcd with three methods. - # external: Deploy etcd cluster with external etcd cluster. - # internal: Deploy etcd cluster by static pod. - etcd_deployment_type: external - # the support os.Release nodes which to be installed - # support ubuntu, centos. - supported_os_distributions: [ ubuntu ] - # the network plugin to be installed. - # support: flannel, calico - kube_network_plugin: calico - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: 1.23.15 - # the minimal version of kubernetes to be installed. - kube_version_min_required: 1.19.10 - # memory size for each kube_control_plane node.(unit kB) - # should be greater than or equal to minimal_master_memory_mb. - minimal_master_memory_mb: 10 - # memory size for each kube_node node.(unit kB) - # should be greater than or equal to minimal_node_memory_mb. - minimal_node_memory_mb: 10 - # Set the Pod CIDR size of a node. - kube_network_node_prefix: 24 - # the cri to be installed. - # support: containerd,docker,crio - container_manager: containerd - # the minimal required version of containerd to be installed. - containerd_min_version_required: v1.6.0 -### [ artifact ] - # offline artifact package for kk. -# artifact_file: /tmp/kubekey.tar.gz - # the md5_file of artifact_file. - artifact_MD5: /tmp/artifact.md5 - # work_dir is the directory where the artifact is extracted. - work_dir: /var/lib/kubekey/ - # the binary's arch container in artifact - artifact_arch: [ "amd64" ] - # etcd binary - etcd_version: v3.5.6 - # cni binary - cni_version: v1.2.0 - # helm binary - helm_version: v3.14.2 - # crictl binary - crictl_version: v1.29.0 - # docker binary - docker_version: 24.0.6 - # cridockerd - cridockerd_version: v0.3.10 - # containerd binary - containerd_version: v1.7.0 - # runc binary - runc_version: 1.1.11 - # calicoctl binary - calicoctl_version: v3.26.1 - # harbor binary - harbor_version: v2.10.0 - # docker-compose binary - dockercompose_version: v2.24.6 -### [ install ] - # registry type. support: harbor, registry - registry_type: harbor - diff --git a/project/roles/precheck/artifact_check/tasks/main.yaml b/project/roles/precheck/artifact_check/tasks/main.yaml deleted file mode 100644 index 3f473f54..00000000 --- a/project/roles/precheck/artifact_check/tasks/main.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Check artifact is exits - command: - if [ ! -f "{{ artifact_file }}" ]; then - exit 1 - fi - -- name: Check artifact file type - command: - if [[ "{{ artifact_file }}" != *{{ item }} ]]; then - exit 1 - fi - loop: ['.tgz','.tar.gz'] - -- name: Check md5 of artifact - command: - if [[ $(md5sum {{ artifact_file }}) != $(cat {{ artifact_MD5 }}) ]]; then - exit 1 - fi - when: - - artifact_MD5 | defined diff --git a/project/roles/precheck/env_check/tasks/main.yaml b/project/roles/precheck/env_check/tasks/main.yaml deleted file mode 100644 index 6d06d115..00000000 --- a/project/roles/precheck/env_check/tasks/main.yaml +++ /dev/null @@ -1,114 +0,0 @@ ---- -- name: Stop if either kube_control_plane or kube_node group is empty - assert: - that: "'{{ item }}' in groups" - loop: - - kube_control_plane - - kube_node - run_once: true - -- name: Stop if etcd group is empty in external etcd mode - assert: - that: "'etcd' in groups" - fail_msg: "Group 'etcd' cannot be empty in external etcd mode" - run_once: true - when: - - etcd_deployment_type != "kubeadm" - -- name: Stop if the os does not support - assert: - that: (allow_unsupported_distribution_setup | default:false) or os.release.ID in supported_os_distributions - fail_msg: "{{ os.release.ID }} is not a known OS" - -- name: Stop if unknown network plugin - vars: - require_network_plugin: ['calico', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'kube-ovn', 'kube-router', 'macvlan', 'custom_cni'] - assert: - that: kube_network_plugin in require_network_plugin - fail_msg: "{{ kube_network_plugin }} is not supported" - when: - - kube_network_plugin | defined - -- name: Stop if unsupported version of Kubernetes - assert: - that: kube_version | version:'>=,{{kube_version_min_required}}' - fail_msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}" - -- name: Stop if even number of etcd hosts - assert: - that: not groups.etcd | length | divisibleby:2 - when: - - inventory_hostname in groups['etcd'] - -- name: Stop if memory is too small for masters - assert: - that: process.memInfo.MemTotal | cut:' kB' >= minimal_master_memory_mb - when: - - inventory_hostname in groups['kube_control_plane'] - -- name: Stop if memory is too small for nodes - assert: - that: process.memInfo.MemTotal | cut:' kB' >= minimal_node_memory_mb - when: - - inventory_hostname in groups['kube_node'] - -# This assertion will fail on the safe side: One can indeed schedule more pods -# on a node than the CIDR-range has space for when additional pods use the host -# network namespace. It is impossible to ascertain the number of such pods at -# provisioning time, so to establish a guarantee, we factor these out. -# NOTICE: the check blatantly ignores the inet6-case -- name: Guarantee that enough network address space is available for all pods - assert: - that: "(kubelet_max_pods | default_if_none:110 | integer) <= (2 | pow: {{ 32 - kube_network_node_prefix | integer }} - 2)" - fail_msg: "Do not schedule more pods on a node than inet addresses are available." - when: - - inventory_hostname in groups['k8s_cluster'] - - kube_network_node_prefix | defined - - kube_network_plugin != 'calico' - -- name: Stop if access_ip is not pingable - command: ping -c1 {{ access_ip }} - when: - - access_ip | defined - - ping_access_ip - changed_when: false - -- name: Stop if kernel version is too low - assert: - that: os.kernelVersion | split:'-' | first | version:'>=,4.9.17' - when: - - kube_network_plugin == 'cilium' or (cilium_deploy_additionally | default:false) - -- name: Stop if bad hostname - vars: - regex: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' - assert: - that: inventory_hostname | match:regex - fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" - -- name: Stop if etcd deployment type is not kubekey, external or kubeadm - vars: - require_etcd_deployment_type: ['kubekey', 'external', 'kubeadm'] - assert: - that: etcd_deployment_type in require_etcd_deployment_type - fail_msg: "The etcd deployment type, 'etcd_deployment_type', must be kubekey, external or kubeadm" - when: - - inventory_hostname in groups['etcd'] - -- name: Stop if container manager is not docker, crio or containerd - vars: - require_container_manager: ['docker', 'crio', 'containerd'] - assert: - that: container_manager in require_container_manager - fail_msg: "The container manager, 'container_manager', must be docker, crio or containerd" - run_once: true - -- name: Ensure minimum containerd version - require_containerd_version: ['latest', 'edge', 'stable'] - assert: - that: containerd_version | version:'>=,{{containerd_min_version_required}}' - fail_msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}" - run_once: yes - when: - - not containerd_version in require_containerd_version - - container_manager == 'containerd'