diff --git a/api/project/v1/base.go b/api/project/v1/base.go index a33d88f7..6f4a6001 100644 --- a/api/project/v1/base.go +++ b/api/project/v1/base.go @@ -28,8 +28,7 @@ type Base struct { RemoteUser string `yaml:"remote_user,omitempty"` // variables - Vars []yaml.Node `yaml:"-"` - VarsFromMarshal yaml.Node `yaml:"vars,omitempty"` + Vars Vars `yaml:"vars,omitempty"` // module default params //ModuleDefaults []map[string]map[string]any `yaml:"module_defaults,omitempty"` @@ -55,3 +54,16 @@ type Base struct { BecomeFlags string `yaml:"become_flags,omitempty"` BecomeExe string `yaml:"become_exe,omitempty"` } + +// Vars is a custom type to hold a list of YAML nodes representing variables. +// This allows for flexible unmarshalling of various YAML structures into Vars. +type Vars struct { + Nodes []yaml.Node +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for Vars. +// It appends the unmarshalled YAML node to the Vars.Nodes slice. +func (v *Vars) UnmarshalYAML(node *yaml.Node) error { + v.Nodes = append(v.Nodes, *node) + return nil +} diff --git a/api/project/v1/taggable.go b/api/project/v1/taggable.go index 7f36d84b..b5d5355e 100644 --- a/api/project/v1/taggable.go +++ b/api/project/v1/taggable.go @@ -72,10 +72,6 @@ func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool { // JoinTag the child block should inherit tag for parent block func JoinTag(child, parent Taggable) Taggable { for _, tag := range parent.Tags { - if tag == AlwaysTag { // skip inherit "always" tag - continue - } - if !slices.Contains(child.Tags, tag) { child.Tags = append(child.Tags, tag) } diff --git a/builtin/core/defaults/config/v1.23.yaml b/builtin/core/defaults/config/v1.23.yaml index c5f17f7e..744de1c0 100644 --- a/builtin/core/defaults/config/v1.23.yaml +++ b/builtin/core/defaults/config/v1.23.yaml @@ -1,70 +1,80 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.8.2 - # etcd binary - etcd_version: v3.5.4 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.6.3 - # docker-compose binary - # dockercompose_version: v2.12.2 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.23.0 - # ========== cri: docker ========== - # docker binary - docker_version: 20.10.18 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.10 - # ========== cri: containerd ========== - # containerd binary - # containerd_version: v1.6.8 - # runc binary - # runc_version: v1.1.4 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.1.1 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.24.5 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.12.6 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.10.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.18 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.8.2 + etcd: + # etcd binary + etcd_version: v3.5.4 + image_registry: + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.6.3 + # docker-compose binary + dockercompose_version: v2.12.2 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.6" # support: containerd,docker container_manager: docker - + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.23.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.18 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.10 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.8 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.9.3 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.24.5 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.12.6 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.3.0 + linux_utils_image: + tag: 3.3.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.2 + dns: + dns_image: + tag: v1.8.6 + dns_cache_image: + tag: 1.21.1 # image_manifests: # - docker.io/calico/apiserver:v3.24.5 # - docker.io/calico/cni:v3.24.5 diff --git a/builtin/core/defaults/config/v1.24.yaml b/builtin/core/defaults/config/v1.24.yaml index 61795376..b17cf732 100644 --- a/builtin/core/defaults/config/v1.24.yaml +++ b/builtin/core/defaults/config/v1.24.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.10.3 - # etcd binary - etcd_version: v3.5.6 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: v2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.7.1 - # docker-compose binary - # dockercompose_version: v2.14.0 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.24.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 20.10.24 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.6.16 - # runc binary - runc_version: v1.1.4 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.1.1 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.25.1 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.13.5 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.10.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.18 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.10.3 + etcd: + # etcd binary + etcd_version: v3.5.6 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: v2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.7.1 + # docker-compose binary + dockercompose_version: v2.14.0 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.6" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.24.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.24 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.16 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.10.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.25.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.4.0 + linux_utils_image: + tag: 3.4.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.2 + dns: + dns_image: + tag: v1.8.6 + dns_cache_image: + tag: 1.22.20 # image_manifests: # - docker.io/calico/apiserver:v3.25.1 # - docker.io/calico/cni:v3.25.1 diff --git a/builtin/core/defaults/config/v1.25.yaml b/builtin/core/defaults/config/v1.25.yaml index 0e2ef2ec..bd01b5c5 100644 --- a/builtin/core/defaults/config/v1.25.yaml +++ b/builtin/core/defaults/config/v1.25.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.10.3 - # etcd binary - etcd_version: v3.5.7 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.8.1 - # docker-compose binary - # dockercompose_version: v2.15.1 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.25.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 20.10.24 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.6.19 - # runc binary - runc_version: v1.1.4 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.1.1 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.25.1 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.13.5 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.10.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.18 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.10.3 + etcd: + # etcd binary + etcd_version: v3.5.7 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.8.1 + # docker-compose binary + dockercompose_version: v2.15.1 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.6" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.25.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.24 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.19 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.11.3 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.25.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.5.0 + linux_utils_image: + tag: 3.5.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.8 + dns: + dns_image: + tag: v1.9.3 + dns_cache_image: + tag: 1.22.20 # image_manifests: # - docker.io/calico/apiserver:v3.25.1 # - docker.io/calico/cni:v3.25.1 diff --git a/builtin/core/defaults/config/v1.26.yaml b/builtin/core/defaults/config/v1.26.yaml index ab1a3696..1159149b 100644 --- a/builtin/core/defaults/config/v1.26.yaml +++ b/builtin/core/defaults/config/v1.26.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.11.2 - # etcd binary - etcd_version: v3.5.8 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.9.1 - # docker-compose binary - # dockercompose_version: v2.16.0 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.26.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 23.0.6 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.6.21 - # runc binary - runc_version: v1.1.5 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.26.1 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.13.5 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.10.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.18 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.11.2 + etcd: + # etcd binary + etcd_version: v3.5.8 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.9.1 + # docker-compose binary + dockercompose_version: v2.16.0 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.7" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.7" + # ========== cri ========== + # crictl binary + crictl_version: v1.26.0 + # ========== cri: docker ========== + # docker binary + docker_version: 23.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.21 + # runc binary + runc_version: v1.1.5 + cni: + multus: + image: + tag: v4.0.2 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.26.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.6.0 + linux_utils_image: + tag: 3.6.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.8 + dns: + dns_image: + tag: v1.9.3 + dns_cache_image: + tag: 1.22.20 # image_manifests: # - docker.io/calico/apiserver:v3.26.1 # - docker.io/calico/cni:v3.26.1 diff --git a/builtin/core/defaults/config/v1.27.yaml b/builtin/core/defaults/config/v1.27.yaml index fd79794b..2124ec58 100644 --- a/builtin/core/defaults/config/v1.27.yaml +++ b/builtin/core/defaults/config/v1.27.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.12.1 - # etcd binary - etcd_version: v3.5.9 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.27.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 23.0.6 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.2 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.26.1 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.14.2 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.11.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.12.1 + etcd: + # etcd binary + etcd_version: v3.5.9 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.7" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.7" + # ========== cri ========== + # crictl binary + crictl_version: v1.27.0 + # ========== cri: docker ========== + # docker binary + docker_version: 23.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.2 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.0.2 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.26.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.14.2 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.11.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.7.0 + linux_utils_image: + tag: 3.7.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.10 + dns: + dns_image: + tag: v1.10.1 + dns_cache_image: + tag: 1.22.20 # image_manifests: # - docker.io/calico/apiserver:v3.26.1 # - docker.io/calico/cni:v3.26.1 diff --git a/builtin/core/defaults/config/v1.28.yaml b/builtin/core/defaults/config/v1.28.yaml index 3f420875..5eb626d0 100644 --- a/builtin/core/defaults/config/v1.28.yaml +++ b/builtin/core/defaults/config/v1.28.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.12.1 - # etcd binary - etcd_version: v3.5.9 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.28.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.6 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.3 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.0 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.12.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.12.1 + etcd: + # etcd binary + etcd_version: v3.5.9 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.8" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.28.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.3 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.1.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.0 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.12.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.8.0 + linux_utils_image: + tag: 3.8.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.10 + dns: + dns_image: + tag: v1.10.1 + dns_cache_image: + tag: 1.22.20 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/defaults/config/v1.29.yaml b/builtin/core/defaults/config/v1.29.yaml index a4640707..14980550 100644 --- a/builtin/core/defaults/config/v1.29.yaml +++ b/builtin/core/defaults/config/v1.29.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.13.3 - # etcd binary - etcd_version: v3.5.10 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.29.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.7 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.6 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.4 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.13.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.10 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.8" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.29.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.1.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.9.0 + linux_utils_image: + tag: 3.9.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.1.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.23.1 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/defaults/config/v1.30.yaml b/builtin/core/defaults/config/v1.30.yaml index ee3254e4..77c870e2 100644 --- a/builtin/core/defaults/config/v1.30.yaml +++ b/builtin/core/defaults/config/v1.30.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.13.3 - # etcd binary - etcd_version: v3.5.10 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.30.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.7 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.6 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.4 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.13.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.10 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.8" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.30.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.2.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.0.0 + linux_utils_image: + tag: 4.0.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.1.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.23.1 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/defaults/config/v1.31.yaml b/builtin/core/defaults/config/v1.31.yaml index a94c9960..a572e158 100644 --- a/builtin/core/defaults/config/v1.31.yaml +++ b/builtin/core/defaults/config/v1.31.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.13.3 - # etcd binary - etcd_version: v3.5.11 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.31.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.7 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.6 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.4 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.13.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.8" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.31.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.2.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.1.0 + linux_utils_image: + tag: 4.1.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.2.0 + dns: + dns_image: + tag: v1.12.0 + dns_cache_image: + tag: 1.23.1 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/defaults/config/v1.32.yaml b/builtin/core/defaults/config/v1.32.yaml index 1cd86209..7810a887 100644 --- a/builtin/core/defaults/config/v1.32.yaml +++ b/builtin/core/defaults/config/v1.32.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.14.3 - # etcd binary - etcd_version: v3.5.11 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.32.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.7 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.6 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.4 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.13.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.14.3 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.8" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.32.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.3.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.2.0 + linux_utils_image: + tag: 4.2.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.2.0 + dns: + dns_image: + tag: v1.12.0 + dns_cache_image: + tag: 1.24.0 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/defaults/config/v1.33.yaml b/builtin/core/defaults/config/v1.33.yaml index 72420b2c..87086538 100644 --- a/builtin/core/defaults/config/v1.33.yaml +++ b/builtin/core/defaults/config/v1.33.yaml @@ -1,70 +1,81 @@ apiVersion: kubekey.kubesphere.io/v1 kind: Config spec: - # zone for kk. how to download files - # kkzone: cn - # work_dir is the directory where the artifact is extracted. - # work_dir: /var/lib/kubekey/ - # the version of kubernetes to be installed. - # should be greater than or equal to kube_version_min_required. - kube_version: {{ .kube_version }} - # helm binary - helm_version: v3.18.5 - # etcd binary - etcd_version: v3.5.11 - # ========== image registry ========== - # keepalived image tag. Used for load balancing when there are multiple image registry nodes. - # keepalived_version: 2.0.20 - # ========== image registry: harbor ========== - # harbor image tag - # harbor_version: v2.10.1 - # docker-compose binary - # dockercompose_version: v2.20.3 - # ========== image registry: docker-registry ========== - # docker-registry image tag - # docker_registry_version: 2.8.3 - # ========== cri ========== - # crictl binary - crictl_version: v1.33.0 - # ========== cri: docker ========== - # docker binary - # docker_version: 24.0.7 - # cridockerd. Required when kube_version is greater than 1.24 - # cridockerd_version: v0.3.1 - # ========== cri: containerd ========== - # containerd binary - containerd_version: v1.7.6 - # runc binary - runc_version: v1.1.7 - # ========== cni ========== - # cni_plugins binary - # cni_plugins_version: v1.2.0 - # ========== cni: calico ========== - # calicoctl binary - calico_version: v3.28.2 - # ========== cni: cilium ========== - # cilium helm - # cilium_version: 1.15.4 - # ========== cni: kubeovn ========== - # kubeovn helm - # kubeovn_version: 1.13.0 - # ========== cni: hybridnet ========== - # hybridnet helm - # hybridnet_version: 0.6.8 - # ========== storageclass ========== - # ========== storageclass: nfs ========== - # nfs provisioner helm version - # nfs_provisioner_version: 4.0.20 - kubernetes: - controller_manager: - extra_args: - cluster-signing-duration: 87600h + download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + kubernetes: + kube_version: {{ .kube_version }} + # helm binary + helm_version: v3.18.5 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 cri: - sandbox_image: - tag: "3.9" # support: containerd,docker container_manager: containerd - + sandbox_image: + tag: "3.9" + # ========== cri ========== + # crictl binary + crictl_version: v1.33.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.3.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.3.0 + linux_utils_image: + tag: 4.3.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.3.0 + dns: + dns_image: + tag: v1.13.0 + dns_cache_image: + tag: 1.24.0 # image_manifests: # - docker.io/calico/apiserver:v3.28.2 # - docker.io/calico/cni:v3.28.2 diff --git a/builtin/core/playbooks/add_nodes.yaml b/builtin/core/playbooks/add_nodes.yaml index ccbb98e4..4175625c 100644 --- a/builtin/core/playbooks/add_nodes.yaml +++ b/builtin/core/playbooks/add_nodes.yaml @@ -1,97 +1,47 @@ --- -- import_playbook: hook/default.yaml +- import_playbook: hook/pre_install.yaml -# load defaults vars +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/common.yaml - - vars/kubernetes.yaml + gather_facts: true + roles: + - defaults + - precheck -# precheck +# Download all required software and generate certificates on the localhost - hosts: - localhost + gather_facts: true roles: - - role: precheck/artifact_check - when: .artifact.artifact_file | empty | not + - certs/init + - download + +# Initialize all nodes and install necessary software packages - hosts: - - k8s_cluster - etcd + - k8s_cluster - image_registry - nfs - gather_facts: true roles: - - precheck/env_check + - native -- hosts: - - localhost - gather_facts: true - roles: - - init/init-artifact - -# init os +# Install the etcd cluster - hosts: - etcd - - k8s_cluster - - registry - - nfs + gather_facts: true roles: - - init/init-os - -- hosts: - - kube_control_plane - tasks: - - name: select init node - run_once: true - add_hostvars: - hosts: k8s_cluster - vars: - init_kubernetes_node: >- - {{- $initNodes := list -}} - {{- range .groups.kube_control_plane -}} - {{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}} - {{- $initNodes = append $initNodes . -}} - {{- end -}} - {{- end -}} - {{- if $initNodes | len | eq 1 -}} - {{ $initNodes | first }} - {{- else if $initNodes | len | lt 1 -}} - {{ index $initNodes (randInt 0 ((sub ($initNodes | len) 1) | int)) }} - {{- end -}} - - name: init node - when: eq .inventory_hostname .init_kubernetes_node - block: - - name: Generate certificate key by kubeadm - command: | - if [ ! -f /etc/kubernetes/kubeadm-config.yaml ]; then - kubectl get cm kubeadm-config -n kube-system -o=jsonpath='{.data.ClusterConfiguration}' > /etc/kubernetes/kubeadm-config.yaml - fi - /usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \ - | awk '/Using certificate key:/{getline; print}' - register: kubeadm_cert_result - - name: add certificate key to all hosts - add_hostvars: - hosts: k8s_cluster - vars: - kubeadm_cert: >- - {{ .kubeadm_cert_result.stdout }} - - name: Generate token by kubeadm - command: /usr/local/bin/kubeadm token create - register: kubeadm_token_result - - name: add token to all hosts - add_hostvars: - hosts: k8s_cluster - vars: - kubeadm_token: >- - {{ .kubeadm_token_result.stdout }} + - etcd - hosts: - k8s_cluster roles: - - role: install/cri + - role: cri + when: or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname) + - role: kubernetes/init-kubernetes when: or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname) - role: kubernetes/pre-kubernetes - when: or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname) + when: or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname) - role: kubernetes/join-kubernetes when: or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname) - role: kubernetes/certs diff --git a/builtin/core/playbooks/artifact_export.yaml b/builtin/core/playbooks/artifact_export.yaml index 6c14c64b..9dc308f2 100644 --- a/builtin/core/playbooks/artifact_export.yaml +++ b/builtin/core/playbooks/artifact_export.yaml @@ -1,11 +1,16 @@ --- -- import_playbook: hook/default.yaml +# Load default variables and perform prechecks on all hosts +- hosts: + - all + gather_facts: true + roles: + - defaults +# Download all required software and generate certificates on the localhost - hosts: - localhost roles: - - init/init-artifact - - init/init-cert + - download tasks: - name: Export artifact command: | diff --git a/builtin/core/playbooks/artifact_images.yaml b/builtin/core/playbooks/artifact_images.yaml index f79c64e1..ae6cb23d 100644 --- a/builtin/core/playbooks/artifact_images.yaml +++ b/builtin/core/playbooks/artifact_images.yaml @@ -1,12 +1,17 @@ --- -- import_playbook: hook/default.yaml +# Load default variables and perform prechecks on all hosts +- hosts: + - all + tags: ["always"] + gather_facts: true + roles: + - defaults - hosts: - localhost - tags: ["always"] - pre_tasks: - - name: Image | Download container images - tags: ["always"] + tasks: + - name: PullImage | Download container images + tags: ["pull"] image: pull: images_dir: >- @@ -14,6 +19,46 @@ manifests: "{{ .image_manifests | toJson }}" when: - .image_manifests | default list | empty | not - roles: - - role: install/image-registry - tags: ["always"] + - name: PushImage | Push images to registry + tags: ["push"] + block: + - name: PushImage | Ensure Harbor project exists for each image + when: .image_registry.type | eq "harbor" + command: | + # Traverse first-level subdirectories in images_dir, skipping 'blobs' + for registry_dir in {{ .binary_dir }}/images/*; do + if [ ! -d "$registry_dir" ] || [ "$(basename "$registry_dir")" = "blobs" ]; then + continue + fi + + # Traverse second-level subdirectories in each registry_dir + for project_dir in "$registry_dir"/*; do + if [ ! -d "$project_dir" ]; then + continue + fi + + project=$(basename "$project_dir") + + # Check if the Harbor project exists; create it if it does not + resp=$(curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X GET "https://{{ .image_registry.auth.registry }}/api/v2.0/projects/${project}") + if echo "$resp" | grep -q '"code":"NOT_FOUND"'; then + curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST \ + -H "Content-Type: application/json" \ + "https://{{ .image_registry.auth.registry }}/api/v2.0/projects" \ + -d "{ \"project_name\": \"${project}\", \"public\": true}" + fi + done + done + - name: PushImage | Push images package to image registry + image: + push: + images_dir: >- + {{ .binary_dir }}/images/ + dest: >- + {{ .image_registry.auth.registry }}/{{ .module.image.src.reference.repository }}:{{ .module.image.src.reference.reference }} + username: >- + {{ .image_registry.auth.username }} + password: >- + {{ .image_registry.auth.password }} + skip_tls_verify: true + diff --git a/builtin/core/playbooks/certs_renew.yaml b/builtin/core/playbooks/certs_renew.yaml index 20c30b51..f9ed9a25 100644 --- a/builtin/core/playbooks/certs_renew.yaml +++ b/builtin/core/playbooks/certs_renew.yaml @@ -1,20 +1,17 @@ --- -# load defaults vars -- import_playbook: hook/default.yaml - +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/certs_renew.yaml + gather_facts: true + roles: + - defaults - hosts: - localhost - tags: ["certs"] roles: - - init/init-cert + - cert/init - hosts: - all - tags: ["certs"] roles: - role: certs/renew diff --git a/builtin/core/playbooks/create_cluster.yaml b/builtin/core/playbooks/create_cluster.yaml index af411d60..03a90a8c 100644 --- a/builtin/core/playbooks/create_cluster.yaml +++ b/builtin/core/playbooks/create_cluster.yaml @@ -1,90 +1,71 @@ --- -- import_playbook: hook/default.yaml - import_playbook: hook/pre_install.yaml -# load defaults vars +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/common.yaml - - vars/kubernetes.yaml + gather_facts: true + roles: + - defaults + - precheck -# precheck +# Download all required software and generate certificates on the localhost - hosts: - localhost roles: - - role: precheck/artifact_check - when: .artifact.artifact_file | empty | not -- hosts: - - k8s_cluster - - etcd - - image_registry - - nfs - gather_facts: true - roles: - - precheck/env_check + - certs/init + - download -- hosts: - - localhost - gather_facts: true - roles: - - init/init-artifact - - init/init-cert - -# init os +# Initialize all nodes and install necessary software packages - hosts: - etcd - k8s_cluster - image_registry - nfs roles: - - init/init-os - -# install -- hosts: - - nfs - gather_facts: true - roles: - - install/nfs + - native +# Install the etcd cluster - hosts: - etcd - gather_facts: true roles: - - install/etcd + - etcd +# Install the private image registry - hosts: - image_registry - gather_facts: true roles: - - install/image-registry + - image-registry +# Install the Kubernetes cluster - hosts: - k8s_cluster gather_facts: true roles: - - install/cri + - cri - kubernetes/pre-kubernetes - kubernetes/init-kubernetes - - kubernetes/join-kubernetes + - role: kubernetes/join-kubernetes + when: + - .init_kubernetes_node | ne .inventory_hostname + - .kubernetes_install_LoadState.stdout | eq "not-found" - role: kubernetes/certs when: - .kubernetes.certs.renew - .groups.kube_control_plane | default list | has .inventory_hostname post_tasks: - - name: Add custom label to cluster + - name: Add custom labels to the cluster nodes command: | {{- range $k, $v := .kubernetes.custom_labels }} /usr/local/bin/kubectl label --overwrite node {{ $.hostname }} {{ $k }}={{ $v }} {{- end }} when: .kubernetes.custom_label | empty | not +# Install Kubernetes cluster software components (CNI and storage class) on a random control plane node - hosts: - kube_control_plane|random roles: - - install/cni - - install/storageclass - - role: install/security - when: .security_enhancement + - cni + - storage-class - import_playbook: hook/post_install.yaml \ No newline at end of file diff --git a/builtin/core/playbooks/delete_cluster.yaml b/builtin/core/playbooks/delete_cluster.yaml index d17fc08f..5df218d2 100644 --- a/builtin/core/playbooks/delete_cluster.yaml +++ b/builtin/core/playbooks/delete_cluster.yaml @@ -1,12 +1,12 @@ --- -- import_playbook: hook/default.yaml +- import_playbook: hook/pre_install.yaml -# load defaults vars +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/common.yaml - - vars/kubernetes.yaml + gather_facts: true + roles: + - defaults - hosts: - k8s_cluster @@ -19,11 +19,12 @@ post_tasks: - name: delete localDNS file ignore_errors: true + loop: "{{ .native.localDNS | toJson }}" command: | sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' {{ .item }} - sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey kubernetes control_plane_endpoint BEGIN.*# kubekey kubernetes control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey image_registry control_plane_endpoint BEGIN.*# kubekey image_registry control_plane_endpoint END@@' {{ .item }} when: .deleteDNS - loop: "{{ .localDNS | toJson }}" - hosts: - etcd diff --git a/builtin/core/playbooks/delete_nodes.yaml b/builtin/core/playbooks/delete_nodes.yaml index 7e7d0211..db579902 100644 --- a/builtin/core/playbooks/delete_nodes.yaml +++ b/builtin/core/playbooks/delete_nodes.yaml @@ -1,32 +1,17 @@ --- -- import_playbook: hook/default.yaml - -# load defaults vars +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/common.yaml - - vars/kubernetes.yaml + gather_facts: true + roles: + - defaults + - precheck - hosts: - kube_control_plane gather_facts: true tasks: - - name: Get kubelet.service LoadState and save to variable - command: systemctl show kubelet.service -p LoadState --value - register: kubernetes_install_LoadState - - name: Get kubelet.service ActiveState and save to variable - command: systemctl show kubelet.service -p ActiveState --value - register: kubernetes_install_ActiveState - - name: Check kubernetes service and version - when: .kubernetes_install_LoadState.stdout | eq "loaded" - block: - - name: Kubernetes should be active - assert: - that: .kubernetes_install_ActiveState.stdout | eq "active" - fail_msg: >- - kubernetes should be active when it's loaded - - name: Keep at least one control_plane node. + - name: DeleteNode | Ensure at least one control plane node remains in the cluster run_once: true command: | {{- $cpNodes := list -}} @@ -35,17 +20,15 @@ {{- $cpNodes = append $cpNodes . -}} {{- end -}} {{- end -}} - {{- if (subtractList $cpNodes .delete_nodes) | empty | not }} - exit 0 - {{- else }} - echo "should keep at least one control_plane" + {{- if (subtractList $cpNodes .delete_nodes) | empty }} + echo "At least one control plane node must be retained in the cluster." >&2 exit 1 {{- end }} - hosts: - k8s_cluster pre_tasks: - - name: delete node from cluster + - name: DeleteNode | Remove node from Kubernetes cluster when: .delete_nodes | default list | has .inventory_hostname command: | if kubectl get node {{ .hostname }} > /dev/null 2>&1; then @@ -55,7 +38,7 @@ else kubectl drain {{ .hostname }} --ignore-daemonsets --delete-emptydir-data --force fi - {{- if .kubernetes.kube_network_plugin | eq "calico" }} + {{- if .cni.type | eq "calico" }} calicoctl delete node {{ .hostname }} {{- end }} kubectl delete node {{ .hostname }} @@ -69,15 +52,16 @@ - .groups.image_registry | default list | has .inventory_hostname | not - .delete_nodes | default list | has .inventory_hostname post_tasks: - - name: delete localDNS file + - name: DeleteNode | Clean up local DNS configuration files ignore_errors: true + loop: "{{ .native.localDNS | toJson }}" command: | sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' {{ .item }} - sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey kubernetes control_plane_endpoint BEGIN.*# kubekey kubernetes control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey image_registry control_plane_endpoint BEGIN.*# kubekey image_registry control_plane_endpoint END@@' {{ .item }} when: - .deleteDNS - .delete_nodes | default list | has .inventory_hostname - loop: "{{ .localDNS | toJson }}" - hosts: - etcd diff --git a/builtin/core/playbooks/delete_registry.yaml b/builtin/core/playbooks/delete_registry.yaml index aa5d412e..1a00e737 100644 --- a/builtin/core/playbooks/delete_registry.yaml +++ b/builtin/core/playbooks/delete_registry.yaml @@ -1,12 +1,10 @@ --- -- import_playbook: hook/default.yaml - -# load defaults vars +# Load default variables and perform prechecks on all hosts - hosts: - all - vars_files: - - vars/common.yaml - - vars/kubernetes.yaml + gather_facts: true + roles: + - defaults - hosts: - image_registry diff --git a/builtin/core/playbooks/hook/default.yaml b/builtin/core/playbooks/hook/default.yaml deleted file mode 100644 index 7a64deef..00000000 --- a/builtin/core/playbooks/hook/default.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: - - all - gather_facts: true - tags: ["always"] - vars: - architectures: - amd64: - - amd64 - - x86_64 - arm64: - - arm64 - - aarch64 - tasks: - - name: Get os arch for each node - tags: ["always"] - set_fact: - binary_type: >- - {{- if .architectures.amd64 | has .os.architecture -}} - amd64 - {{- else if .architectures.arm64 | has .os.architecture -}} - arm64 - {{- end -}} - -- hosts: - - all - tags: ["always"] - vars: - # work_dir: default is /kubekey - binary_dir: >- - {{ .work_dir }}/kubekey - scripts_dir: >- - {{ .binary_dir }}/scripts - tmp_dir: /tmp/kubekey \ No newline at end of file diff --git a/builtin/core/playbooks/hook/post_install.yaml b/builtin/core/playbooks/hook/post_install.yaml index a48f7428..2eed64bd 100644 --- a/builtin/core/playbooks/hook/post_install.yaml +++ b/builtin/core/playbooks/hook/post_install.yaml @@ -1,9 +1,16 @@ --- -- name: Execute post install scripts +- name: Post | Apply Security Enhancements + hosts: + - all + roles: + - role: security + when: .security_enhancement + +- name: Post | Run Post-Installation Scripts hosts: - all tasks: - - name: Copy post install scripts to remote + - name: Post | Copy post-installation scripts to remote hosts ignore_errors: true copy: src: >- @@ -11,15 +18,15 @@ dest: >- /etc/kubekey/scripts/post_install_{{ .inventory_hostname }}.sh mode: 0755 - register: execute_result + register: post_install_copy_result - - name: Execute post install scripts - when: .execute_result.error | empty + - name: Post | Execute post-installation scripts on remote hosts + when: .post_install_copy_result.error | empty command: | for file in /etc/kubekey/scripts/post_install_*.sh; do - if [ -f $file ]; then - # execute file - chmod +x $file - $file + if [ -f "$file" ]; then + # Make the script executable and run it + chmod +x "$file" + "$file" fi - done + done \ No newline at end of file diff --git a/builtin/core/playbooks/hook/pre_install.yaml b/builtin/core/playbooks/hook/pre_install.yaml index b32a24b5..25167316 100644 --- a/builtin/core/playbooks/hook/pre_install.yaml +++ b/builtin/core/playbooks/hook/pre_install.yaml @@ -1,9 +1,9 @@ --- -- name: Execute pre install scripts +- name: Pre | Run Pre-Installation Scripts hosts: - all tasks: - - name: Copy pre install scripts to remote + - name: Pre | Copy pre-installation scripts to remote hosts ignore_errors: true copy: src: >- @@ -11,15 +11,15 @@ dest: >- /etc/kubekey/scripts/pre_install_{{ .inventory_hostname }}.sh mode: 0755 - register: execute_result + register: pre_install_copy_result - - name: Execute pre install scripts - when: .execute_result.error | empty + - name: Pre | Execute pre-installation scripts on remote hosts + when: .pre_install_copy_result.error | empty command: | for file in /etc/kubekey/scripts/pre_install_*.sh; do - if [ -f $file ]; then - # execute file - chmod +x $file - $file + if [ -f "$file" ]; then + # Make the script executable and run it + chmod +x "$file" + "$file" fi done diff --git a/builtin/core/playbooks/init_os.yaml b/builtin/core/playbooks/init_os.yaml index 134ea733..f9f6edc9 100644 --- a/builtin/core/playbooks/init_os.yaml +++ b/builtin/core/playbooks/init_os.yaml @@ -1,16 +1,25 @@ --- -- import_playbook: hook/default.yaml +# Load default variables and perform prechecks on all hosts +- hosts: + - all + gather_facts: true + roles: + - defaults +# Download all required software and generate certificates on the localhost - hosts: - localhost + gather_facts: true roles: - - init/init-artifact - - init/init-cert + - certs/init + - download +# Initialize all nodes and install necessary software packages - hosts: - etcd - k8s_cluster - - registry + - image_registry - nfs roles: - - init/init-os + - native + diff --git a/builtin/core/playbooks/init_registry.yaml b/builtin/core/playbooks/init_registry.yaml index b2d21090..5c2b4f8a 100644 --- a/builtin/core/playbooks/init_registry.yaml +++ b/builtin/core/playbooks/init_registry.yaml @@ -1,15 +1,25 @@ --- -- import_playbook: hook/default.yaml - +# Load default variables and perform prechecks on all hosts - hosts: - - localhost - roles: - - init/init-artifact - - init/init-cert - -- hosts: - - image_registry + - all + tags: ["always"] gather_facts: true roles: - - init/init-os - - install/image-registry + - role: defaults + + +# Download all required software and generate certificates on the localhost +- hosts: + - localhost + gather_facts: true + roles: + - role: certs/init + - role: download + +# Initialize all nodes and install necessary software packages +- hosts: + - image_registry + tags: ["always"] + roles: + - role: native + - role: image-registry diff --git a/builtin/core/playbooks/precheck.yaml b/builtin/core/playbooks/precheck.yaml index d85ee5b9..ccb6b08c 100644 --- a/builtin/core/playbooks/precheck.yaml +++ b/builtin/core/playbooks/precheck.yaml @@ -1,19 +1,8 @@ --- -- import_playbook: hook/default.yaml - +# Load default variables and perform prechecks on all hosts - hosts: - - localhost - roles: - - role: precheck/artifact_check - when: .artifact.artifact_file | empty | not - -- hosts: - - k8s_cluster - - etcd - - image_registry - - nfs + - all gather_facts: true - tags: ["always"] roles: - - role: precheck/env_check - tags: ["always"] + - defaults + - precheck diff --git a/builtin/core/playbooks/vars/certs_renew.yaml b/builtin/core/playbooks/vars/certs_renew.yaml deleted file mode 100644 index 2609b7f9..00000000 --- a/builtin/core/playbooks/vars/certs_renew.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kubernetes: - etcd: - deployment_type: external -cri: - # support: containerd,docker - container_manager: docker -image_registry: - type: harbor diff --git a/builtin/core/playbooks/vars/common.yaml b/builtin/core/playbooks/vars/common.yaml deleted file mode 100644 index ad7d26ca..00000000 --- a/builtin/core/playbooks/vars/common.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# The global registry used for all images. Leave empty to use default registries. -global_registry: "" - -# The registry to use for docker.io images. -dockerio_registry: >- - {{- if .global_registry | empty | not -}} - {{ .global_registry }} - {{- else -}} - docker.io - {{- end -}} - -# The registry to use for quay.io images. -quayio_registry: >- - {{- if .global_registry | empty | not -}} - {{ .global_registry }} - {{- else -}} - quay.io - {{- end -}} - -# The registry to use for ghcr.io images. -ghcrio_registry: >- - {{- if .global_registry | empty | not -}} - {{ .global_registry }} - {{- else -}} - ghcr.io - {{- end -}} - -# Enable or disable security enhancement features. -security_enhancement: false - -# Set to true to remove the container runtime interface (CRI) such as containerd or Docker from target nodes. -deleteCRI: false - -# Set to true to uninstall etcd from target nodes. -deleteETCD: false - -# Set to true to remove local DNS entries managed by Kubekey from the specified files. -deleteDNS: false - -# Set to true to uninstall the image registry from target nodes. -deleteImageRegistry: false - -# List of local DNS files to clean up if deleteDNS is enabled. -localDNS: - - /etc/hosts \ No newline at end of file diff --git a/builtin/core/playbooks/vars/kubernetes.yaml b/builtin/core/playbooks/vars/kubernetes.yaml deleted file mode 100644 index dcc48735..00000000 --- a/builtin/core/playbooks/vars/kubernetes.yaml +++ /dev/null @@ -1,193 +0,0 @@ -kubernetes: - cluster_name: kubekey - # Supported network plugins: flannel, calico - kube_network_plugin: calico - # The image repository for Kubernetes components. - image_repository: >- - {{ .dockerio_registry }}/kubesphere - # Minimum memory (in MB) required for each kube_worker node. - # This value must be at least minimal_node_memory_mb. - minimal_node_memory_mb: 10 - # Maximum number of pods allowed per node. - max_pods: 110 - audit: false - networking: - # The complete pod CIDR for the cluster. Supports: ipv4, ipv6, or dual-stack (ipv4,ipv6). - pod_cidr: 10.233.64.0/18 - # Subnet mask size for IPv4 pod CIDR on each node. - ipv4_mask_size: 24 - # Subnet mask size for IPv6 pod CIDR on each node. - ipv6_mask_size: 64 - # The complete service CIDR for the cluster. Supports: ipv4, ipv6, or dual-stack (ipv4,ipv6). - service_cidr: 10.233.0.0/18 - dns_domain: cluster.local - dns_image: - registry: >- - {{ .dockerio_registry }} - repository: >- - coredns - tag: 1.8.6 - dns_cache_image: - registry: >- - {{ .dockerio_registry }} - repository: kubesphere/k8s-dns-node-cache - tag: 1.22.20 - dns_service_ip: >- - {{ index (.kubernetes.networking.service_cidr | ipInCIDR) 2 }} - # The IP address for nodelocaldns to bind. - clusterDNS: 169.254.25.10 - apiserver: - port: 6443 - certSANs: [] - extra_args: - # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true - controller_manager: - extra_args: - # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true - scheduler: - extra_args: - # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true - kube_proxy: - enabled: true - # Supported proxy modes: ipvs, iptables - mode: "ipvs" - config: - iptables: - masqueradeAll: false - masqueradeBit: 14 - minSyncPeriod: 0s - syncPeriod: 30s - kubelet: - max_pod: 110 - pod_pids_limit: 10000 -# feature_gates: - container_log_max_size: 5Mi - container_log_max_files: 3 -# extra_args: - coredns: - dns_etc_hosts: [] - # DNS zone configuration - zone_configs: - # Each entry defines DNS zones to match. Default port is 53. - # ".": matches all DNS zones. - # "example.com": matches *.example.com using DNS server on port 53. - # "example.com:54": matches *.example.com using DNS server on port 54. - - zones: [".:53"] - additional_configs: - - errors - - ready - - prometheus :9153 - - loop - - reload - - loadbalance - cache: 30 - kubernetes: - zones: - - "{{ .kubernetes.networking.dns_domain }}" - # Internal DNS message rewriting can be configured here. -# rewrite: -# - rule: continue -# field: name -# type: exact -# value: "example.com example2.com" -# options: "" - forward: - # Forwarding rules for DNS queries. - - from: "." - # Destination endpoints for forwarding. The TO syntax allows protocol specification. - to: ["/etc/resolv.conf"] - # List of domains to exclude from forwarding. - except: [] - # Use TCP for forwarding even if the request was over UDP. - force_tcp: false - # Prefer UDP for forwarding, retry with TCP if response is truncated. - prefer_udp: false - # Number of consecutive failed health checks before marking an upstream as down. -# max_fails: 2 - # Time after which cached connections expire. -# expire: 10s - # TLS properties for secure connections can be set here. -# tls: -# cert_file: "" -# key_file: "" -# ca_file: "" -# tls_servername: "" - # Policy for selecting upstream servers: random (default), round_robin, sequential. -# policy: "random" - # Health check configuration for upstream servers. -# health_check: "" - # Maximum number of concurrent DNS queries. - max_concurrent: 1000 - # Specify a stable IP address or DNS name for the control plane endpoint. - # For high availability, it is recommended to use a DNS domain name for control_plane_endpoint. - # Options: - # 1. If a DNS domain name is available: - # - Set control_plane_endpoint to the DNS name and configure it to resolve to all control plane node IPs. - # 2. If a DNS domain name is not available: - # - Set control_plane_endpoint to a DNS name that can be added later. - # - Add the DNS name resolution to the localDNS file on each node in the format: - # {{ vip }} {{ control_plane_endpoint }} - # - If a VIP is available: - # Deploy kube-vip on control plane nodes to map the VIP to the actual node IPs. - # - If a VIP is not available: - # Deploy HAProxy on worker nodes. Map a fixed IP (e.g., 127.0.0.2) as the VIP and route it to all control plane node IPs. - # - # Non-HA scenario: (No installation provided; parameters are for manual configuration.) - # In this case, set the VIP to one of the control plane nodes. - control_plane_endpoint: - host: lb.kubesphere.local - port: "{{ .kubernetes.apiserver.port }}" - # Supported types: local, kube_vip, haproxy - # If type is local, the following applies: - # - On control-plane nodes: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }} - # - On worker nodes: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }} - type: local - kube_vip: - # The IP address of the node's network interface (e.g., "eth0"). - # address: - # Supported modes: ARP, BGP - mode: ARP - image: - registry: >- - {{ .dockerio_registry }} - repository: plndr/kube-vip - tag: v0.7.2 - haproxy: - # The IP address on the node's "lo" (loopback) interface. - address: 127.0.0.1 - health_port: 8081 - image: - registry: >- - {{ .dockerio_registry }} - repository: library/haproxy - tag: 2.9.6-alpine - etcd: - # etcd can be deployed in three ways: - # - external: Use an external etcd cluster. - # - internal: Deploy etcd as a static pod. - deployment_type: external - image: - registry: >- - {{ .dockerio_registry }} - repository: kubesphere/etcd - tag: 3.5.0 - # custom_labels: {} - # Enable or disable automatic renewal of Kubernetes certificates. - certs: - # Kubernetes Certificate Authority (CA) files can be provided in three ways: - # 1. kubeadm: Leave ca_cert and ca_key empty to let kubeadm generate them automatically. - # These certificates are valid for 10 years and remain unchanged. - # 2. kubekey: Set ca_cert to {{ .binary_dir }}/pki/ca.cert and ca_key to {{ .binary_dir }}/pki/ca.key. - # These are generated by kubekey, valid for 10 years, and can be updated using `cert.ca_date`. - # 3. custom: Provide your own CA files by specifying the absolute paths for ca_cert and ca_key. - # - # To use custom CA files, specify their absolute paths below. - # If left empty, the default behavior (kubeadm or kubekey) will be used. - ca_cert: "" - ca_key: "" - # The following fields are for the Kubernetes front-proxy CA certificate and key. - # To use custom front-proxy CA files, specify their absolute paths below. - # If left empty, the default behavior will be used. - front_proxy_cert: "" - front_proxy_key: "" - renew: true diff --git a/builtin/core/roles/init/init-cert/tasks/main.yaml b/builtin/core/roles/certs/init/tasks/main.yaml similarity index 97% rename from builtin/core/roles/init/init-cert/tasks/main.yaml rename to builtin/core/roles/certs/init/tasks/main.yaml index c3016589..ab7b02a1 100644 --- a/builtin/core/roles/init/init-cert/tasks/main.yaml +++ b/builtin/core/roles/certs/init/tasks/main.yaml @@ -1,5 +1,6 @@ --- - name: Cert | Generate the root CA certificate file + tags: ["always"] gen_cert: cn: root date: "{{ .certs.ca.date }}" @@ -10,6 +11,7 @@ {{ .binary_dir }}/pki/root.crt - name: Cert | Generate Kubernetes CA certificates + tags: ["kubernetes"] block: - name: Cert | Generate the Kubernetes CA certificate file gen_cert: @@ -41,6 +43,7 @@ {{ .binary_dir }}/pki/front-proxy.crt - name: Cert | Generate the etcd certificate file + tags: ["etcd"] gen_cert: root_key: >- {{ .binary_dir }}/pki/root.key @@ -69,6 +72,7 @@ when: .groups.etcd | default list | empty | not - name: Cert | Generate the image registry certificate file + tags: ["image_registry"] gen_cert: root_key: >- {{ .binary_dir }}/pki/root.key @@ -100,6 +104,7 @@ when: .groups.image_registry | default list | empty | not - name: Cert | Set ownership of the PKI directory to the sudo user + tags: ["kubernetes"] block: - name: Cert | Change ownership of the PKI directory to the sudo user ignore_errors: true diff --git a/builtin/core/roles/certs/renew/etcd/tasks/main.yaml b/builtin/core/roles/certs/renew/etcd/tasks/main.yaml index 8bd995c3..838e12b5 100644 --- a/builtin/core/roles/certs/renew/etcd/tasks/main.yaml +++ b/builtin/core/roles/certs/renew/etcd/tasks/main.yaml @@ -2,19 +2,19 @@ - name: ETCD | Copy CA certificate to remote host copy: src: >- - {{ .binary_dir }}/pki/root.crt + {{ ..etcd.ca_file }} dest: /etc/ssl/etcd/ssl/ca.crt - name: ETCD | Copy server certificate to remote host copy: src: >- - {{ .binary_dir }}/pki/etcd.crt + {{ .etcd.cert_file }} dest: /etc/ssl/etcd/ssl/server.crt - name: ETCD | Copy server private key to remote host copy: src: >- - {{ .binary_dir }}/pki/etcd.key + {{ .etcd.key_file }} dest: /etc/ssl/etcd/ssl/server.key - name: ETCD | Restart etcd service to apply new certificates diff --git a/builtin/core/roles/certs/renew/image-registry/tasks/harbor.yaml b/builtin/core/roles/certs/renew/image-registry/tasks/harbor.yaml index 8f886fd2..daed5f7f 100644 --- a/builtin/core/roles/certs/renew/image-registry/tasks/harbor.yaml +++ b/builtin/core/roles/certs/renew/image-registry/tasks/harbor.yaml @@ -4,14 +4,14 @@ src: >- {{ .binary_dir }}/pki/image_registry.crt dest: >- - /opt/harbor/{{ .harbor_version }}/ssl/server.crt + /opt/harbor/{{ .image_registry.harbor_version }}/ssl/server.crt - name: Harbor | Copy image registry private key to remote host copy: src: >- {{ .binary_dir }}/pki/image_registry.key dest: >- - /opt/harbor/{{ .harbor_version }}/ssl/server.key + /opt/harbor/{{ .image_registry.harbor_version }}/ssl/server.key - name: Harbor | Restart Harbor service to apply new certificates command: systemctl restart harbor.service diff --git a/builtin/core/roles/certs/renew/image-registry/tasks/registry.yaml b/builtin/core/roles/certs/renew/image-registry/tasks/registry.yaml index f664f455..356c0980 100644 --- a/builtin/core/roles/certs/renew/image-registry/tasks/registry.yaml +++ b/builtin/core/roles/certs/renew/image-registry/tasks/registry.yaml @@ -4,14 +4,14 @@ src: >- {{ .binary_dir }}/pki/image_registry.crt dest: >- - /opt/docker-registry/{{ .docker_registry_version }}/ssl/server.crt + /opt/docker-registry/{{ .image_registry.docker_registry_version }}/ssl/server.crt - name: Docker Registry | Copy image registry private key to remote host copy: src: >- {{ .binary_dir }}/pki/image_registry.key dest: >- - /opt/docker-registry/{{ .docker_registry_version }}/ssl/server.key + /opt/docker-registry/{{ .image_registry.docker_registry_version }}/ssl/server.key - name: Docker Registry | Restart registry service to apply new certificates - command: systemctl restart registry.service + command: systemctl restart docker-registry.service diff --git a/builtin/core/roles/certs/renew/kubernetes/tasks/etcd.yaml b/builtin/core/roles/certs/renew/kubernetes/tasks/etcd.yaml index ad83998a..dfd71d81 100644 --- a/builtin/core/roles/certs/renew/kubernetes/tasks/etcd.yaml +++ b/builtin/core/roles/certs/renew/kubernetes/tasks/etcd.yaml @@ -2,20 +2,20 @@ - name: ETCD | Copy CA certificate to remote host copy: src: >- - {{ .binary_dir }}/pki/root.crt + {{ .etcd.ca_file }} dest: /etc/kubernetes/pki/etcd/ca.crt mode: 0755 - name: ETCD | Copy client certificate to remote host copy: src: >- - {{ .binary_dir }}/pki/etcd.crt + {{ .etcd.cert_file }} dest: /etc/kubernetes/pki/etcd/client.crt mode: 0755 - name: ETCD | Copy client key to remote host copy: src: >- - {{ .binary_dir }}/pki/etcd.key + {{ .etcd.key_file }} dest: /etc/kubernetes/pki/etcd/client.key mode: 0755 diff --git a/builtin/core/roles/certs/renew/kubernetes/tasks/kube.yaml b/builtin/core/roles/certs/renew/kubernetes/tasks/kube.yaml index 14b6b3c4..5e3ca321 100644 --- a/builtin/core/roles/certs/renew/kubernetes/tasks/kube.yaml +++ b/builtin/core/roles/certs/renew/kubernetes/tasks/kube.yaml @@ -15,7 +15,7 @@ /usr/local/bin/kubeadm alpha certs renew admin.conf /usr/local/bin/kubeadm alpha certs renew controller-manager.conf /usr/local/bin/kubeadm alpha certs renew scheduler.conf - {{- if .kubernetes.etcd.deployment_type | eq "internal" }} + {{- if .etcd.deployment_type | eq "internal" }} /usr/local/bin/kubeadm alpha certs renew etcd-healthcheck-client /usr/local/bin/kubeadm alpha certs renew etcd-peer /usr/local/bin/kubeadm alpha certs renew etcd-server @@ -28,7 +28,7 @@ /usr/local/bin/kubeadm certs renew admin.conf /usr/local/bin/kubeadm certs renew controller-manager.conf /usr/local/bin/kubeadm certs renew scheduler.conf - {{- if .kubernetes.etcd.deployment_type | eq "internal" }} + {{- if .etcd.deployment_type | eq "internal" }} /usr/local/bin/kubeadm certs renew etcd-healthcheck-client /usr/local/bin/kubeadm certs renew etcd-peer /usr/local/bin/kubeadm certs renew etcd-server diff --git a/builtin/core/roles/certs/renew/kubernetes/tasks/main.yaml b/builtin/core/roles/certs/renew/kubernetes/tasks/main.yaml index a084f200..f65d382d 100644 --- a/builtin/core/roles/certs/renew/kubernetes/tasks/main.yaml +++ b/builtin/core/roles/certs/renew/kubernetes/tasks/main.yaml @@ -3,7 +3,7 @@ - include_tasks: etcd.yaml when: - - .kubernetes.etcd.deployment_type | eq "external" + - .etcd.deployment_type | eq "external" - .groups.etcd | default list | empty | not - name: Kubernetes | Restart Kubernetes control plane pods @@ -13,7 +13,7 @@ docker ps -af name=k8s_PODS_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f docker ps -af name=k8s_PODS_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f docker ps -af name=k8s_PODS_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f - {{- if .kubernetes.etcd.deployment_type | eq "docker" }} + {{- if .etcd.deployment_type | eq "docker" }} # Restarting etcd pods managed by Docker docker ps -af name=k8s_PODS_etcd* -q | xargs --no-run-if-empty docker rm -f {{- end }} @@ -22,7 +22,7 @@ crictl pods --name kube-apiserver-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %' crictl pods --name kube-controller-manager-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %' crictl pods --name kube-scheduler-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %' - {{- if .kubernetes.etcd.deployment_type | eq "internal" }} + {{- if .etcd.deployment_type | eq "internal" }} # Restarting etcd pods managed by the container runtime crictl pods --name etcd-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %' {{- end }} diff --git a/builtin/core/roles/cni/calico/defaults/main.yaml b/builtin/core/roles/cni/calico/defaults/main.yaml new file mode 100644 index 00000000..69dbecae --- /dev/null +++ b/builtin/core/roles/cni/calico/defaults/main.yaml @@ -0,0 +1,12 @@ +cni: + calico: + values: | + # calico helm values + tigeraOperator: + registry: {{ .image_registry.quayio_registry }} + calicoctl: + image: {{ .image_registry.dockerio_registry }}/calico/ctl + installation: + registry: {{ .image_registry.dockerio_registry }} + calicoNetwork: + bgp: Enabled \ No newline at end of file diff --git a/builtin/core/roles/cni/calico/tasks/main.yaml b/builtin/core/roles/cni/calico/tasks/main.yaml new file mode 100644 index 00000000..28644013 --- /dev/null +++ b/builtin/core/roles/cni/calico/tasks/main.yaml @@ -0,0 +1,33 @@ +--- +- name: Calico | Check if calicoctl is installed + ignore_errors: true + command: calicoctl version + register: calicoctl_install_version + register_type: yaml + +- name: Calico | Install calicoctl if it is not present + when: .calicoctl_install_version.error | empty | not + block: + - name: Calico | Copy calicoctl binary to remote node + copy: + src: >- + {{ .binary_dir }}/cni/calico/{{ .cni.calico_version }}/{{ .binary_type }}/calicoctl + dest: /usr/local/bin/calicoctl + mode: 0755 + +- name: Calico | Copy Calico Helm package to remote node + copy: + src: >- + {{ .binary_dir }}/cni/calico/tigera-operator-{{ .cni.calico_version }}.tgz + dest: >- + /etc/kubernetes/cni/tigera-operator-{{ .cni.calico_version }}.tgz + +- name: Calico | Generate custom values file for Calico + copy: + content: | + {{ .cni.calico.values }} + dest: /etc/kubernetes/cni/calico-values.yaml + +- name: Calico | Deploy Calico using Helm + command: | + helm upgrade --install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .cni.calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml diff --git a/builtin/core/roles/install/cni/cilium/defaults/main.yaml b/builtin/core/roles/cni/cilium/defaults/main.yaml similarity index 54% rename from builtin/core/roles/install/cni/cilium/defaults/main.yaml rename to builtin/core/roles/cni/cilium/defaults/main.yaml index e18f39a6..0c9ad8c4 100644 --- a/builtin/core/roles/install/cni/cilium/defaults/main.yaml +++ b/builtin/core/roles/cni/cilium/defaults/main.yaml @@ -3,47 +3,47 @@ cni: values: | # cilium helm values image: - repository: {{ .quayio_registry }}/cilium/cilium-cli + repository: {{ .image_registry.quayio_registry }}/cilium/cilium-cli certgen: image: - repository: {{ .quayio_registry }}/cilium/certgen + repository: {{ .image_registry.quayio_registry }}/cilium/certgen hubble: relay: image: - repository: {{ .quayio_registry }}/cilium/hubble-relay-ci + repository: {{ .image_registry.quayio_registry }}/cilium/hubble-relay-ci ui: backend: image: - repository: {{ .quayio_registry }}/cilium/hubble-ui-backend + repository: {{ .image_registry.quayio_registry }}/cilium/hubble-ui-backend frontend: image: - repository: {{ .quayio_registry }}/cilium/hubble-ui + repository: {{ .image_registry.quayio_registry }}/cilium/hubble-ui envoy: image: - repository: {{ .quayio_registry }}/cilium/cilium-envoy + repository: {{ .image_registry.quayio_registry }}/cilium/cilium-envoy operator: replicas: 2 image: - repository: {{ .quayio_registry }}/cilium/operator + repository: {{ .image_registry.quayio_registry }}/cilium/operator nodeinit: image: - repository: {{ .quayio_registry }}/cilium/startup-script + repository: {{ .image_registry.quayio_registry }}/cilium/startup-script preflight: image: - repository: {{ .quayio_registry }}/cilium/cilium-ci + repository: {{ .image_registry.quayio_registry }}/cilium/cilium-ci clustermesh: apiserver: image: - repository: {{ .quayio_registry }}/cilium/clustermesh-apiserver-ci + repository: {{ .image_registry.quayio_registry }}/cilium/clustermesh-apiserver-ci authentication: mutual: spire: install: initImage: - repository: {{ .dockerio_registry }}/library/busybox + repository: {{ .image_registry.dockerio_registry }}/library/busybox agent: image: - repository: {{ .ghcrio_registry }}/spiffe/spire-agent + repository: {{ .image_registry.ghcrio_registry }}/spiffe/spire-agent server: image: repository: {{ .ghcrio_registry }}/spiffe/spire-server @@ -56,12 +56,12 @@ cni: {{- if .cni.ipv4_support }} clusterPoolIPv4PodCIDRList: - {{ .cni.ipv4_pods_cidr }} - clusterPoolIPv4MaskSize: {{ .cni.ipv4_block_size }} + clusterPoolIPv4MaskSize: {{ .cni.ipv4_mask_size }} {{- end }} {{- if .cni.ipv6_support }} clusterPoolIPv6PodCIDRList: - {{ .cni.ipv6_pods_cidr }} - clusterPoolIPv6MaskSize: {{ .cni.ipv6_block_size }} + clusterPoolIPv6MaskSize: {{ .cni.ipv6_mask_size }} {{- end }} {{- if not (.kubernetes.kube_proxy.enabled | default true) }} kubeProxyReplacement: "true" diff --git a/builtin/core/roles/cni/cilium/tasks/main.yaml b/builtin/core/roles/cni/cilium/tasks/main.yaml new file mode 100644 index 00000000..3b1aca89 --- /dev/null +++ b/builtin/core/roles/cni/cilium/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +- name: Cilium | Ensure the cilium Helm chart archive is available + copy: + src: >- + {{ .binary_dir }}/cni/cilium/cilium-{{ .cni.cilium_version }}.tgz + dest: >- + /etc/kubernetes/cni/cilium-{{ .cni.cilium_version }}.tgz + +- name: Cilium | Create the cilium Helm custom values file + copy: + content: | + {{ .cni.cilium.values }} + dest: /etc/kubernetes/cni/cilium-values.yaml + +# See: https://docs.cilium.io/en/stable/installation/k8s-install-helm/ +- name: Cilium | Deploy cilium with Helm + command: | + helm upgrade --install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cni.cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml diff --git a/builtin/core/roles/cni/defaults/main.yaml b/builtin/core/roles/cni/defaults/main.yaml new file mode 100644 index 00000000..8feaec93 --- /dev/null +++ b/builtin/core/roles/cni/defaults/main.yaml @@ -0,0 +1,17 @@ +cni: + # In Kubernetes, Pod CIDR supports IPv4, IPv6, and dual-stack. Specify as: + # "Single-stack IPv4": pod_cidr in "ipv4" format + # "Single-stack IPv6": pod_cidr in "ipv6" format + # "Dual-stack": pod_cidr in "ipv4,ipv6" format + ipv4_support: >- + {{ eq (.cni.pod_cidr | splitList "," | first | ipFamily) "IPv4" }} + ipv4_pods_cidr: >- + {{- if eq (.cni.pod_cidr | splitList "," | first | ipFamily) "IPv4" -}} + {{ .cni.pod_cidr | splitList "," | first }} + {{- end -}} + ipv6_support: >- + {{- eq (.cni.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily) "IPv6" }} + ipv6_pods_cidr: >- + {{- if eq (.cni.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily) "IPv6" -}} + {{ .cni.pod_cidr | default "10.233.64.0/18" | splitList "," | last }} + {{- end -}} diff --git a/builtin/core/roles/install/cni/flannel/defaults/main.yaml b/builtin/core/roles/cni/flannel/defaults/main.yaml similarity index 62% rename from builtin/core/roles/install/cni/flannel/defaults/main.yaml rename to builtin/core/roles/cni/flannel/defaults/main.yaml index 12272e10..ef3207f6 100644 --- a/builtin/core/roles/install/cni/flannel/defaults/main.yaml +++ b/builtin/core/roles/cni/flannel/defaults/main.yaml @@ -7,8 +7,8 @@ cni: podCidrv6: {{ .cni.ipv6_pod_cidr }} flannel: image: - repository: {{ .dockerio_registry }}/flannel/flannel + repository: {{ .image_registry.dockerio_registry }}/flannel/flannel image_cni: - repository: {{ .dockerio_registry }}/flannel/flannel-cni-plugin + repository: {{ .image_registry.dockerio_registry }}/flannel/flannel-cni-plugin # support "vxlan" and "host-gw" backend: vxlan \ No newline at end of file diff --git a/builtin/core/roles/install/cni/flannel/tasks/main.yaml b/builtin/core/roles/cni/flannel/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/cni/flannel/tasks/main.yaml rename to builtin/core/roles/cni/flannel/tasks/main.yaml diff --git a/builtin/core/roles/install/cni/hybridnet/defaults/main.yaml b/builtin/core/roles/cni/hybridnet/defaults/main.yaml similarity index 55% rename from builtin/core/roles/install/cni/hybridnet/defaults/main.yaml rename to builtin/core/roles/cni/hybridnet/defaults/main.yaml index d46b6055..8befc909 100644 --- a/builtin/core/roles/install/cni/hybridnet/defaults/main.yaml +++ b/builtin/core/roles/cni/hybridnet/defaults/main.yaml @@ -3,4 +3,4 @@ cni: values: | # hybridnet helm values images: - registryURL: {{ .dockerio_registry }} \ No newline at end of file + registryURL: {{ .image_registry.dockerio_registry }} \ No newline at end of file diff --git a/builtin/core/roles/install/cni/hybridnet/tasks/main.yaml b/builtin/core/roles/cni/hybridnet/tasks/main.yaml similarity index 66% rename from builtin/core/roles/install/cni/hybridnet/tasks/main.yaml rename to builtin/core/roles/cni/hybridnet/tasks/main.yaml index 8d006e71..d6fca980 100644 --- a/builtin/core/roles/install/cni/hybridnet/tasks/main.yaml +++ b/builtin/core/roles/cni/hybridnet/tasks/main.yaml @@ -2,9 +2,9 @@ - name: Hybridnet | Synchronize Hybridnet Helm chart package to remote node copy: src: >- - {{ .binary_dir }}/cni/hybridnet-{{ .hybridnet_version }}.tgz + {{ .binary_dir }}/cni/hybridnet-{{ .cni.hybridnet_version }}.tgz dest: >- - /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz + /etc/kubernetes/cni/hybridnet-{{ .cni.hybridnet_version }}.tgz - name: Hybridnet | Generate Hybridnet custom values file copy: @@ -15,4 +15,4 @@ # Reference: https://artifacthub.io/packages/helm/hybridnet/hybridnet - name: Hybridnet | Install Hybridnet using Helm command: | - helm upgrade --install --namespace kube-system hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz -f /etc/kubernetes/cni/hybridnet-values.yaml + helm upgrade --install --namespace kube-system hybridnet /etc/kubernetes/cni/hybridnet-{{ .cni.hybridnet_version }}.tgz -f /etc/kubernetes/cni/hybridnet-values.yaml diff --git a/builtin/core/roles/install/cni/kubeovn/defaults/main.yaml b/builtin/core/roles/cni/kubeovn/defaults/main.yaml similarity index 85% rename from builtin/core/roles/install/cni/kubeovn/defaults/main.yaml rename to builtin/core/roles/cni/kubeovn/defaults/main.yaml index 5d967a90..03db06a3 100644 --- a/builtin/core/roles/install/cni/kubeovn/defaults/main.yaml +++ b/builtin/core/roles/cni/kubeovn/defaults/main.yaml @@ -4,7 +4,7 @@ cni: # kube-ovn helm values global: registry: - address: {{ .dockerio_registry }}/kubeovn + address: {{ .image_registry.dockerio_registry }}/kubeovn {{- $ips := list }} {{- range .groups.kube_control_plane | default list }} {{- $internalIPv4 := index $.hostvars . "internal_ipv4" | default "" }} @@ -21,13 +21,13 @@ cni: {{- if and .cni.ipv4_support (not .cni.ipv6_support) }} ipv4: POD_CIDR: {{ .cni.ipv4_pods_cidr }} - SVC_CIDR: {{ .cni.kube_svc_cidr }} + SVC_CIDR: {{ .cni.service_cidr }} {{ else if and .cni.ipv6_support (not .cni.ipv4_support) }} ipv6: POD_CIDR: {{ .cni.ipv6_pods_cidr }} - SVC_CIDR: {{ .cni.kube_svc_cidr }} + SVC_CIDR: {{ .cni.service_cidr }} {{ else if and .cni.ipv4_support .cni.ipv6_support }} dual_stack: POD_CIDR: {{ .cni.ipv4_pods_cidr }},{{ .cni.ipv6_pods_cidr }} - SVC_CIDR: {{ .cni.kube_svc_cidr }} + SVC_CIDR: {{ .cni.service_cidr }} {{- end }} \ No newline at end of file diff --git a/builtin/core/roles/install/cni/kubeovn/tasks/main.yaml b/builtin/core/roles/cni/kubeovn/tasks/main.yaml similarity index 74% rename from builtin/core/roles/install/cni/kubeovn/tasks/main.yaml rename to builtin/core/roles/cni/kubeovn/tasks/main.yaml index c6867c55..d2fdab47 100644 --- a/builtin/core/roles/install/cni/kubeovn/tasks/main.yaml +++ b/builtin/core/roles/cni/kubeovn/tasks/main.yaml @@ -2,9 +2,9 @@ - name: Kubeovn | Synchronize Kube-OVN Helm chart package to remote node copy: src: >- - {{ .binary_dir }}/cni/kubeovn/kubeovn-{{ .kubeovn_version }}.tgz + {{ .binary_dir }}/cni/kubeovn/kubeovn-{{ .cni.kubeovn_version }}.tgz dest: >- - /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz + /etc/kubernetes/cni/kubeovn-{{ .cni.kubeovn_version }}.tgz - name: Kubeovn | Generate Kube-OVN custom values file copy: @@ -19,9 +19,9 @@ - name: Kubeovn | Install Kube-OVN using Helm with custom values command: | - helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz -f /etc/kubernetes/cni/kubeovn-values.yaml + helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .cni.kubeovn_version }}.tgz -f /etc/kubernetes/cni/kubeovn-values.yaml # Reference: https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart - name: Kubeovn | Install Kube-OVN using Helm command: | - helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz + helm upgrade --install --namespace kubeovn-system kubeovn /etc/kubernetes/cni/kubeovn-{{ .cni.kubeovn_version }}.tgz diff --git a/builtin/core/roles/cni/meta/main.yaml b/builtin/core/roles/cni/meta/main.yaml new file mode 100644 index 00000000..494a6619 --- /dev/null +++ b/builtin/core/roles/cni/meta/main.yaml @@ -0,0 +1,19 @@ +--- +dependencies: + - role: cni/multus + when: .cni.multus.enabled + + - role: cni/calico + when: .cni.type | eq "calico" + + - role: cni/cilium + when: .cni.type | eq "cilium" + + - role: cni/flannel + when: .cni.type | eq "flannel" + + - role: cni/kubeovn + when: .cni.type | eq "kubeovn" + + - role: cni/hybridnet + when: .cni.type | eq "hybridnet" diff --git a/builtin/core/roles/install/cni/multus/tasks/main.yaml b/builtin/core/roles/cni/multus/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/cni/multus/tasks/main.yaml rename to builtin/core/roles/cni/multus/tasks/main.yaml diff --git a/builtin/core/roles/install/cni/multus/templates/multus.yaml b/builtin/core/roles/cni/multus/templates/multus.yaml similarity index 100% rename from builtin/core/roles/install/cni/multus/templates/multus.yaml rename to builtin/core/roles/cni/multus/templates/multus.yaml diff --git a/builtin/core/roles/install/cri/containerd/defaults/main.yaml b/builtin/core/roles/cri/containerd/defaults/main.yaml similarity index 100% rename from builtin/core/roles/install/cri/containerd/defaults/main.yaml rename to builtin/core/roles/cri/containerd/defaults/main.yaml diff --git a/builtin/core/roles/install/cri/containerd/files/containerd.service b/builtin/core/roles/cri/containerd/files/containerd.service similarity index 100% rename from builtin/core/roles/install/cri/containerd/files/containerd.service rename to builtin/core/roles/cri/containerd/files/containerd.service diff --git a/builtin/core/roles/install/cri/containerd/tasks/main.yaml b/builtin/core/roles/cri/containerd/tasks/main.yaml similarity index 65% rename from builtin/core/roles/install/cri/containerd/tasks/main.yaml rename to builtin/core/roles/cri/containerd/tasks/main.yaml index be0eaa5b..aa02a482 100644 --- a/builtin/core/roles/install/cri/containerd/tasks/main.yaml +++ b/builtin/core/roles/cri/containerd/tasks/main.yaml @@ -5,10 +5,10 @@ register: runc_install_version - name: Containerd | Ensure the runc binary is present on the remote node - when: or (.runc_install_version.error | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not) + when: or (.runc_install_version.error | empty | not) (.runc_install_version.stdout | contains (printf "runc version %s\n" (.cri.runc_version | default "" | trimPrefix "v" )) | not) copy: src: >- - {{ .binary_dir }}/runc/{{ .runc_version }}/{{ .binary_type }}/runc.{{ .binary_type }} + {{ .binary_dir }}/runc/{{ .cri.runc_version }}/{{ .binary_type }}/runc.{{ .binary_type }} dest: /usr/local/bin/runc mode: 0755 @@ -18,17 +18,17 @@ register: containerd_install_version - name: Containerd | Install and configure containerd if not present or version mismatch - when: or (.containerd_install_version.error | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not) + when: or (.containerd_install_version.error | empty | not) (.containerd_install_version.stdout | contains (printf " %s " .cri.containerd_version) | not) block: - name: Containerd | Copy containerd binary archive to the remote node copy: src: >- - {{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .binary_type }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz + {{ .binary_dir }}/containerd/{{ .cri.containerd_version }}/{{ .binary_type }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz dest: >- - {{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz + {{ .tmp_dir }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz - name: Containerd | Extract containerd binaries to /usr/local/bin command: | - tar -xvf {{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz --strip-components=1 -C /usr/local/bin/ + tar -xvf {{ .tmp_dir }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type }}.tar.gz --strip-components=1 -C /usr/local/bin/ - name: Containerd | Generate the containerd configuration file template: src: config.toml @@ -42,23 +42,25 @@ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service - name: Containerd | Synchronize image registry TLS certificates to the remote node - when: .groups.image_registry | default list | empty | not block: - name: Containerd | Copy image registry CA certificate to the remote node + when: .image_registry.auth.ca_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/root.crt + {{ .image_registry.auth.ca_file }} dest: >- /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt - name: Containerd | Copy image registry server certificate to the remote node + when: .image_registry.auth.cert_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/image_registry.crt + {{ .image_registry.auth.cert_file }} dest: >- /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt - name: Containerd | Copy image registry server key to the remote node + when: .image_registry.auth.key_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/image_registry.key + {{ .image_registry.auth.key_file }} dest: >- /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key diff --git a/builtin/core/roles/install/cri/containerd/templates/config.toml b/builtin/core/roles/cri/containerd/templates/config.toml similarity index 89% rename from builtin/core/roles/install/cri/containerd/templates/config.toml rename to builtin/core/roles/cri/containerd/templates/config.toml index f9ffa4c9..3d3afe98 100644 --- a/builtin/core/roles/install/cri/containerd/templates/config.toml +++ b/builtin/core/roles/cri/containerd/templates/config.toml @@ -63,19 +63,23 @@ state = "/run/containerd" username = "{{ .image_registry.auth.username }}" password = "{{ .image_registry.auth.password }}" [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".tls] -{{- if .groups.image_registry | default list | empty | not }}ecure_skip_verify = true +{{- if .image_registry.auth.ca_file | empty | not }} ca_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt" +{{- end }} +{{- if .image_registry.auth.cert_file | empty | not }} cert_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt" +{{- end }} +{{- if .image_registry.auth.key_file | empty | not }} key_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key" {{- end }} - insecure_skip_verify = {{ .image_registry.auth.skip_ssl | default true }} + insecure_skip_verify = {{ .image_registry.auth.insecure | default true }} {{- if .cri.registry.auths | empty | not }} {{- range .cri.registry.auths }} [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth] username = "{{ .username }}" password = "{{ .password }}" [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".tls] - {{- if.ca_file }} + {{- if .ca_file }} ca_file = {{ .ca_file }} {{- end }} {{- if .crt_file }} @@ -84,6 +88,6 @@ state = "/run/containerd" {{- if .key_file }} key_file = {{ .key_file }} {{- end }} - insecure_skip_verify = {{ .skip_ssl | default true }} + insecure_skip_verify = {{ .insecure | default true }} {{- end }} {{- end }} diff --git a/builtin/core/roles/install/cri/crictl/tasks/main.yaml b/builtin/core/roles/cri/crictl/tasks/main.yaml similarity index 59% rename from builtin/core/roles/install/cri/crictl/tasks/main.yaml rename to builtin/core/roles/cri/crictl/tasks/main.yaml index 64f955e0..bbcafc35 100644 --- a/builtin/core/roles/install/cri/crictl/tasks/main.yaml +++ b/builtin/core/roles/cri/crictl/tasks/main.yaml @@ -5,17 +5,17 @@ register: crictl_install_version - name: Crictl | Install and configure crictl if not present or version mismatch - when: or (.crictl_install_version.error | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version)) + when: or (.crictl_install_version.error | empty | not) (.crictl_install_version.stdout | ne (printf "crictl version %s" .cri.crictl_version)) block: - name: Crictl | Copy crictl binary archive to the remote node copy: src: >- - {{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .binary_type }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz + {{ .binary_dir }}/crictl/{{ .cri.crictl_version }}/{{ .binary_type }}/crictl-{{ .cri.crictl_version }}-linux-{{ .binary_type }}.tar.gz dest: >- - {{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz + {{ .tmp_dir }}/crictl-{{ .cri.crictl_version }}-linux-{{ .binary_type }}.tar.gz - name: Crictl | Extract crictl binary to /usr/local/bin command: | - tar -xvf {{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin/ + tar -xvf {{ .tmp_dir }}/crictl-{{ .cri.crictl_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin/ - name: Crictl | Generate crictl configuration file template: src: crictl.yaml diff --git a/builtin/core/roles/install/cri/crictl/templates/crictl.yaml b/builtin/core/roles/cri/crictl/templates/crictl.yaml similarity index 100% rename from builtin/core/roles/install/cri/crictl/templates/crictl.yaml rename to builtin/core/roles/cri/crictl/templates/crictl.yaml diff --git a/builtin/core/roles/install/cri/docker/defaults/main.yaml b/builtin/core/roles/cri/docker/defaults/main.yaml similarity index 100% rename from builtin/core/roles/install/cri/docker/defaults/main.yaml rename to builtin/core/roles/cri/docker/defaults/main.yaml diff --git a/builtin/core/roles/install/cri/docker/files/containerd.service b/builtin/core/roles/cri/docker/files/containerd.service similarity index 100% rename from builtin/core/roles/install/cri/docker/files/containerd.service rename to builtin/core/roles/cri/docker/files/containerd.service diff --git a/builtin/core/roles/install/cri/docker/files/docker.service b/builtin/core/roles/cri/docker/files/docker.service similarity index 100% rename from builtin/core/roles/install/cri/docker/files/docker.service rename to builtin/core/roles/cri/docker/files/docker.service diff --git a/builtin/core/roles/install/cri/docker/tasks/cridockerd.yaml b/builtin/core/roles/cri/docker/tasks/cridockerd.yaml similarity index 65% rename from builtin/core/roles/install/cri/docker/tasks/cridockerd.yaml rename to builtin/core/roles/cri/docker/tasks/cridockerd.yaml index d570a0f6..cfbe4f82 100644 --- a/builtin/core/roles/install/cri/docker/tasks/cridockerd.yaml +++ b/builtin/core/roles/cri/docker/tasks/cridockerd.yaml @@ -5,17 +5,17 @@ register: cridockerd_install_version - name: Cridockerd | Install and configure cri-dockerd if not present or version mismatch - when: or (.cridockerd_install_version.error | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not) + when: or (.cridockerd_install_version.error | empty | not) (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cri.cridockerd_version) | not) block: - name: Cridockerd | Copy cri-dockerd binary archive to the remote node copy: src: >- - {{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz + {{ .binary_dir }}/cri-dockerd/{{ .cri.cridockerd_version }}/{{ .binary_type }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz dest: >- - {{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz + {{ .tmp_dir }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz - name: Cridockerd | Extract cri-dockerd binary to /usr/local/bin command: | - tar -xvf {{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz --strip-components=1 -C /usr/local/bin/ + tar -xvf {{ .tmp_dir }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.{{ .binary_type }}.tgz --strip-components=1 -C /usr/local/bin/ - name: Cridockerd | Generate cri-dockerd systemd service file template: src: cri-dockerd.service diff --git a/builtin/core/roles/install/cri/docker/tasks/main.yaml b/builtin/core/roles/cri/docker/tasks/main.yaml similarity index 72% rename from builtin/core/roles/install/cri/docker/tasks/main.yaml rename to builtin/core/roles/cri/docker/tasks/main.yaml index 868531c1..2c3c086f 100644 --- a/builtin/core/roles/install/cri/docker/tasks/main.yaml +++ b/builtin/core/roles/cri/docker/tasks/main.yaml @@ -2,7 +2,7 @@ # Docker | Install cri-dockerd if required for Kubernetes >= v1.24.0 - include_tasks: cridockerd.yaml when: - - .kube_version | semverCompare ">=v1.24.0" + - .kubernetes.kube_version | semverCompare ">=v1.24.0" - name: Docker | Check if Docker is installed on the system ignore_errors: true @@ -10,17 +10,17 @@ register: docker_install_version - name: Docker | Install and configure Docker if not present or version mismatch - when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not) + when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .cri.docker_version) | not) block: - name: Docker | Copy Docker binary archive to the remote node copy: src: >- - {{ .binary_dir }}/docker/{{ .docker_version }}/{{ .binary_type }}/docker-{{ .docker_version }}.tgz + {{ .binary_dir }}/docker/{{ .cri.docker_version }}/{{ .binary_type }}/docker-{{ .cri.docker_version }}.tgz dest: >- - {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz + {{ .tmp_dir }}/docker-{{ .cri.docker_version }}.tgz - name: Docker | Extract Docker binaries to /usr/local/bin command: | - tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz --wildcards docker/* + tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .cri.docker_version }}.tgz --wildcards 'docker/*' - name: Docker | Generate Docker configuration file template: src: daemon.json @@ -39,23 +39,25 @@ systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service - name: Docker | Synchronize image registry TLS certificates to the remote node - when: .groups.image_registry | default list | empty | not block: - name: Docker | Copy image registry CA certificate to the remote node + when: .image_registry.auth.ca_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/root.crt + {{ .image_registry.auth.ca_file }} dest: >- /etc/docker/certs.d/{{ .image_registry.auth.registry }}/ca.crt - - name: Docker | Copy image registry client certificate to the remote node + - name: Docker | Copy image registry server certificate to the remote node + when: .image_registry.auth.cert_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/image_registry.crt + {{ .image_registry.auth.cert_file }} dest: >- /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.cert - - name: Docker | Copy image registry client key to the remote node + - name: Docker | Copy image registry server key to the remote node + when: .image_registry.auth.key_file | empty | not copy: src: >- - {{ .binary_dir }}/pki/image_registry.key + {{ .image_registry.auth.key_file }} dest: >- /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.key diff --git a/builtin/core/roles/install/cri/docker/templates/cri-dockerd.service b/builtin/core/roles/cri/docker/templates/cri-dockerd.service similarity index 100% rename from builtin/core/roles/install/cri/docker/templates/cri-dockerd.service rename to builtin/core/roles/cri/docker/templates/cri-dockerd.service diff --git a/builtin/core/roles/install/cri/docker/templates/daemon.json b/builtin/core/roles/cri/docker/templates/daemon.json similarity index 94% rename from builtin/core/roles/install/cri/docker/templates/daemon.json rename to builtin/core/roles/cri/docker/templates/daemon.json index d774759c..1e914e17 100644 --- a/builtin/core/roles/install/cri/docker/templates/daemon.json +++ b/builtin/core/roles/cri/docker/templates/daemon.json @@ -10,7 +10,7 @@ "registry-mirrors": {{ .cri.registry.mirrors | toJson }}, {{- end }} {{- $insecure_registries := .cri.registry.insecure_registries | default list -}} -{{- if .image_registry.auth.skip_ssl -}} +{{- if .image_registry.auth.insecure -}} {{- $insecure_registries = append $insecure_registries .image_registry.auth.registry -}} {{- end -}} "insecure-registries": {{ $insecure_registries | toJson }}, diff --git a/builtin/core/roles/cri/meta/main.yaml b/builtin/core/roles/cri/meta/main.yaml new file mode 100644 index 00000000..34b40899 --- /dev/null +++ b/builtin/core/roles/cri/meta/main.yaml @@ -0,0 +1,8 @@ +dependencies: + - role: cri/crictl + + - role: cri/docker + when: .cri.container_manager | eq "docker" + + - role: cri/containerd + when: .cri.container_manager | eq "containerd" \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/01-cluster_require.yaml b/builtin/core/roles/defaults/defaults/main/01-cluster_require.yaml new file mode 100644 index 00000000..6e27c0c2 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/01-cluster_require.yaml @@ -0,0 +1,36 @@ +# Cluster parameter boundaries +cluster_require: + # Maximum etcd WAL fsync duration for 99th percentile (in nanoseconds) + etcd_disk_wal_fysnc_duration_seconds: 10000000 + # Allow installation on unsupported Linux distributions + allow_unsupported_distribution_setup: false + # Supported operating system distributions + supported_os_distributions: + - ubuntu + - '"ubuntu"' + - centos + - '"centos"' + # Required network plugins + require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn'] + # Minimum supported Kubernetes version + kube_version_min_required: v1.23.0 + # Minimum memory (in MB) required for each control plane node + # Must be greater than or equal to minimal_master_memory_mb + minimal_master_memory_mb: 10 + # Minimum memory (in MB) required for each worker node + # Must be greater than or equal to minimal_node_memory_mb + minimal_node_memory_mb: 10 + # Supported etcd deployment types + require_etcd_deployment_type: ['internal', 'external'] + # Supported container runtimes + require_container_manager: ['docker', 'containerd'] + # Minimum required version of containerd + containerd_min_version_required: v1.6.0 + # Supported CPU architectures + supported_architectures: + - amd64 + - x86_64 + - arm64 + - aarch64 + # Minimum required Linux kernel version + min_kernel_version: 4.9.17 \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/01-main.yaml b/builtin/core/roles/defaults/defaults/main/01-main.yaml new file mode 100644 index 00000000..d921df13 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/01-main.yaml @@ -0,0 +1,39 @@ +work_dir: /root/kubekey +binary_dir: >- + {{ .work_dir }}/kubekey +scripts_dir: >- + {{ .binary_dir }}/scripts +tmp_dir: /tmp/kubekey + +# Mapping of common machine architecture names to their standard forms +transform_architectures: + amd64: + - amd64 + - x86_64 + arm64: + - arm64 + - aarch64 + + +# Enable enhanced security features for stricter cluster security requirements. +security_enhancement: false + +# Enable Kubernetes audit logging. +# Audit logs record and track critical operations within the cluster, helping administrators monitor security events, troubleshoot issues, and meet compliance requirements (e.g., SOC2, ISO 27001). +audit: false + +# When removing a node, also uninstall the node's container runtime (CRI), such as Docker or containerd. +deleteCRI: true + +# When removing a node, also uninstall etcd from the node. +deleteETCD: true + +# When removing a node, restore the node's DNS configuration. +deleteDNS: true + +# When removing a node, also uninstall any private image registry (such as Harbor or registry) installed on the node. +# This is typically used in conjunction with nodes defined in inventory.groups.image_registry. +deleteImageRegistry: false + +# image_manifests: List of container images to be synchronized to the private registry +image_manifests: [] \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/02-certs.yaml b/builtin/core/roles/defaults/defaults/main/02-certs.yaml new file mode 100644 index 00000000..a7fca7f4 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/02-certs.yaml @@ -0,0 +1,49 @@ +# Certificate generation configuration +# The following certificates will be generated: +# - etcd certificates +# - Kubernetes cluster certificates (replacing the CA certificate generated by kubeadm, which is limited to a 10-year validity) +# - Image registry certificates (for Harbor and similar registries) + +# Certificate chain structure: +# CA (self-signed or provided) +# |- etcd.cert +# |- etcd.key +# | +# |- image_registry.cert +# |- image_registry.key +# | +# |- kubernetes.cert +# |- kubernetes.key +# | |- kubeadm uses this to generate server certificates (kube-apiserver certificate) +# |- front-proxy.cert +# |- front-proxy.key +# | +# |- image-registry.cert +# |- image-registry.key + +certs: + # CA certificate settings + ca: + # CA certificate expiration time + date: 87600h + # Certificate generation policy: + # IfNotPresent: Validate the certificate if it exists; generate a self-signed certificate only if it does not exist + gen_cert_policy: IfNotPresent + kubernetes_ca: + date: 87600h + # How to generate the certificate file. Supported values: IfNotPresent, Always + gen_cert_policy: IfNotPresent + front_proxy_ca: + date: 87600h + # How to generate the certificate file. Supported values: IfNotPresent, Always + gen_cert_policy: IfNotPresent + # etcd certificate + etcd: + date: 87600h + # How to generate the certificate file. Supported values: IfNotPresent, Always + gen_cert_policy: IfNotPresent + # image_registry certificate + image_registry: + date: 87600h + # How to generate the certificate file. Supported values: IfNotPresent, Always + gen_cert_policy: IfNotPresent \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/02-image_registry.yaml b/builtin/core/roles/defaults/defaults/main/02-image_registry.yaml new file mode 100644 index 00000000..636b8381 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/02-image_registry.yaml @@ -0,0 +1,79 @@ +# In an online environment (when image_registry.auth.registry is empty), images are pulled directly from their original registries to the cluster. +# In an offline environment (when image_registry.auth.registry is set), images are first pulled from the source registry, cached locally, pushed to a private registry (such as Harbor), and then used by the cluster. + +image_registry: + # Specify which image registry to install. Supported values: harbor, docker-registry + # If left empty, no image registry will be installed (assumes an existing registry is already available). + type: "" + ha_vip: "" + # Directory where images to be pushed to the registry are stored. + # Path for storing offline images + images_dir: >- + {{ .tmp_dir }}/images/ + # Image registry authentication settings + auth: + registry: >- + {{- if .image_registry.type | empty | not }} + {{- if .image_registry.ha_vip | empty | not -}} + {{ .image_registry.ha_vip }} + {{- else if .groups.image_registry | default list | empty | not -}} + {{- $internalIPv4 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv4" | default "" -}} + {{- $internalIPv6 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv6" | default "" -}} + {{- if $internalIPv4 | empty | not -}} + {{ $internalIPv4 }} + {{- else if $internalIPv6 | empty | not -}} + {{ $internalIPv6 }} + {{- end -}} + {{- end -}} + {{- end -}} + username: admin + password: Harbor12345 + insecure: >- + {{- if .image_registry.type | empty -}} + true + {{- end -}} + ca_file: >- + {{- if .image_registry.type | empty | not -}} + {{ .binary_dir }}/pki/root.crt + {{- end -}} + cert_file: >- + {{- if .image_registry.type | empty | not -}} + {{ .binary_dir }}/pki/image_registry.crt + {{- end -}} + key_file: >- + {{- if .image_registry.type | empty | not -}} + {{ .binary_dir }}/pki/image_registry.key + {{- end -}} + # Registry endpoint for images from docker.io + dockerio_registry: >- + {{- if .image_registry.auth.registry | empty | not -}} + {{ .image_registry.auth.registry }} + {{- else -}} + docker.io + {{- end -}} + + # Registry endpoint for images from quay.io + quayio_registry: >- + {{- if .image_registry.auth.registry | empty | not -}} + {{ .image_registry.auth.registry }} + {{- else -}} + quay.io + {{- end -}} + + # Registry endpoint for images from ghcr.io + ghcrio_registry: >- + {{- if .image_registry.auth.registry | empty | not -}} + {{ .image_registry.auth.registry }} + {{- else -}} + ghcr.io + {{- end -}} + + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 diff --git a/builtin/core/roles/defaults/defaults/main/02-native.yaml b/builtin/core/roles/defaults/defaults/main/02-native.yaml new file mode 100644 index 00000000..2d80e0d2 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/02-native.yaml @@ -0,0 +1,25 @@ +# Essential operating system configuration settings +native: + ntp: + # List of NTP servers used for system time synchronization + servers: + - "cn.pool.ntp.org" + # Toggle to enable or disable the NTP service + enabled: true + # System timezone configuration + timezone: Asia/Shanghai + + # NFS service configuration for nodes assigned the 'nfs' role in the inventory + nfs: + # Directories to be shared via NFS + share_dir: + - /share/ + # Whether to set the node's hostname to the value defined in inventory.hosts. + set_hostname: true + # List of DNS configuration files to update on each node. + # This ensures that, during cluster installation, critical hostnames can be resolved locally even if no DNS service is available. + # For example: + # [control_plane_endpoint of master node] -> master node IP + # [hostname of the node being installed] -> corresponding node IP + localDNS: + - /etc/hosts \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/03-kubernetes.yaml b/builtin/core/roles/defaults/defaults/main/03-kubernetes.yaml new file mode 100644 index 00000000..e23f8797 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/03-kubernetes.yaml @@ -0,0 +1,116 @@ +kubernetes: + # Name of the cluster to be installed + cluster_name: kubekey + + # Kubernetes version to deploy + kube_version: v1.33.1 + # helm binary + helm_version: v3.18.5 + + # Image repository for built-in Kubernetes images + image_repository: >- + {{ .image_registry.dockerio_registry }}/kubesphere + + # Kubernetes network configuration + # kube-apiserver pod parameters + apiserver: + port: 6443 + certSANs: [] + extra_args: + # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + + # kube-controller-manager pod parameters + controller_manager: + extra_args: + cluster-signing-duration: 87600h + # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + + # kube-scheduler pod parameters + scheduler: + extra_args: + # Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true + + # kube-proxy pod parameters + kube_proxy: + enabled: true + # Supported proxy modes: ipvs, iptables + mode: "ipvs" + config: + iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s + + # kubelet service parameters + kubelet: + max_pod: 110 + pod_pids_limit: 10000 +# feature_gates: + container_log_max_size: 5Mi + container_log_max_files: 3 +# extra_args: + + # Specify a stable IP address or DNS name for the control plane endpoint. + # For high availability, it is recommended to set control_plane_endpoint to a DNS name. + # Configuration guidance: + # 1. If a DNS name is available: + # - Set control_plane_endpoint to that DNS name and ensure it resolves to all control plane node IPs. + # 2. If no DNS name is available: + # - You can set a DNS name now and add the resolution later. + # - Add the resolution to each node's local DNS file, for example: + # {{ vip }} {{ control_plane_endpoint }} + # - If you have a VIP (Virtual IP): + # Deploy kube-vip on control plane nodes to map the VIP to the actual node IPs. + # - If you do not have a VIP: + # Deploy HAProxy on worker nodes, use a fixed IP (such as 127.0.0.2) as the VIP, and forward to all control plane node IPs. + # + # For non-HA scenarios (manual configuration only, not automatically installed): + # You can set the VIP to the IP of a single control plane node. + control_plane_endpoint: + host: lb.kubesphere.local + port: "{{ .kubernetes.apiserver.port }}" + # Supported types: local, kube_vip, haproxy + # When type is local, configure as follows: + # - On control-plane nodes: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }} + # - On worker nodes: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }} + type: local + kube_vip: + # The IP address of the node's network interface (e.g., "eth0"). + address: "" + # Supported modes: ARP, BGP + mode: ARP + image: + registry: >- + {{ .dockerio_registry }} + repository: plndr/kube-vip + tag: v0.7.2 + haproxy: + # The IP address on the node's "lo" (loopback) interface. + address: 127.0.0.1 + health_port: 8081 + image: + registry: >- + {{ .dockerio_registry }} + repository: library/haproxy + tag: 2.9.6-alpine + + # Whether to automatically renew Kubernetes certificates + certs: + # There are three ways to provide the Kubernetes CA (Certificate Authority) files: + # 1. kubeadm: Leave ca_cert and ca_key empty, and kubeadm will generate them automatically. These certificates are valid for 10 years and will not change. + # 2. kubekey: Set ca_cert to {{ .binary_dir }}/pki/ca.cert and ca_key to {{ .binary_dir }}/pki/ca.key. + # These certificates are generated by kubekey, valid for 10 years, and can be updated via `cert.ca_date`. + # 3. Custom: Manually specify the absolute paths for ca_cert and ca_key to use your own CA files. + # + # To use custom CA files, fill in the absolute paths below. + # If left empty, the default behavior (kubeadm or kubekey) will be used. + ca_cert: "" + ca_key: "" + # The following fields are for the Kubernetes front-proxy CA certificate and key. + # To use custom front-proxy CA files, fill in the absolute paths below. + # If left empty, the default behavior will be used. + front_proxy_cert: "" + front_proxy_key: "" + # Automatically renew service certificates (Note: CA certificates cannot be renewed automatically) + renew: false diff --git a/builtin/core/roles/defaults/defaults/main/04-cni.yaml b/builtin/core/roles/defaults/defaults/main/04-cni.yaml new file mode 100644 index 00000000..6e963a56 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/04-cni.yaml @@ -0,0 +1,41 @@ +cni: + # CNI plugin to use (equivalent to kubernetes.kube_network_plugin) + # Specify the network plugin to install for the cluster. Supported: calico, cilium, flannel, hybridnet, kubeovn, other + # kube_network_plugin: calico + type: calico + # Maximum number of pods supported per node + max_pods: 110 + # The complete Pod IP pool for the cluster. Supports IPv4, IPv6, and dual-stack. + pod_cidr: 10.233.64.0/18 + # IPv4 subnet mask length for pod allocation per node. Determines the size of each node's pod IP pool. + ipv4_mask_size: 24 + # IPv6 subnet mask length for pod allocation per node. + ipv6_mask_size: 64 + # The complete Service IP pool for the cluster. Supports IPv4, IPv6, and dual-stack. + service_cidr: 10.233.0.0/18 + + # Network enhancement plugin for multiple pod network interfaces (Multus) + multus: + # Enable or disable the network enhancement plugin + enabled: false + image: + registry: >- + {{ .image_registry.ghcrio_registry }} + repository: k8snetworkplumbingwg/multus-cni + tag: v4.3.0 + + # ========== cni ========== + # cni_plugins binary + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/04-cri.yaml b/builtin/core/roles/defaults/defaults/main/04-cri.yaml new file mode 100644 index 00000000..7a96c18d --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/04-cri.yaml @@ -0,0 +1,40 @@ +cri: + # Container runtime to use. Supported: containerd, docker + container_manager: containerd + # Cgroup driver for the container runtime. Supported: systemd, cgroupfs + cgroup_driver: systemd + # Pause/sandbox image configuration + sandbox_image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: kubesphere/pause + tag: "3.9" + # CRI socket endpoint for the selected container runtime + cri_socket: >- + {{- if .cri.container_manager | eq "containerd" -}} + unix:///var/run/containerd/containerd.sock + {{- else if and (.cri.container_manager | eq "docker") (.kubernetes.kube_version | semverCompare ">=v1.24.0") -}} + unix:///var/run/cri-dockerd.sock + {{- end -}} + + # Registry configuration for CRI, including mirrors, insecure registries, and authentication + registry: + mirrors: ["https://registry-1.docker.io"] + insecure_registries: [] + auths: [] + + # ========== cri ========== + # crictl binary + crictl_version: v1.33.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # docker-compose binary + dockercompose_version: v2.20.3 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/04-etcd.yaml b/builtin/core/roles/defaults/defaults/main/04-etcd.yaml new file mode 100644 index 00000000..b7a19356 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/04-etcd.yaml @@ -0,0 +1,44 @@ +# etcd service configuration +etcd: + # etcd supports two deployment types: + # - external: Use an external etcd cluster. + # - internal: Deploy etcd as static Pods within the cluster. + deployment_type: external + etcd_version: v3.5.11 + image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: kubesphere/etcd + tag: "{{ .etcd.etcd_version }}" + # endpoints: ["https://127.1.1.1:2379"] + # Environment variables for etcd service + env: + election_timeout: 5000 + heartbeat_interval: 250 + compaction_retention: 8 + snapshot_count: 10000 + data_dir: /var/lib/etcd + token: k8s_etcd + # metrics: basic + # quota_backend_bytes: 100 + # max_request_bytes: 100 + # max_snapshots: 100 + # max_wals: 5 + # log_level: info + # unsupported_arch: arm64 + # etcd backup configuration + backup: + backup_dir: /var/lib/etcd-backup + keep_backup_number: 5 + etcd_backup_script: "backup.sh" + on_calendar: "*-*-* *:00/30:00" + # Enable etcd performance tuning (set to true to enable) + performance: false + # Enable etcd traffic prioritization (set to true to enable) + traffic_priority: false + ca_file: >- + {{ .binary_dir }}/pki/root.crt + cert_file: >- + {{ .binary_dir }}/pki/etcd.crt + key_file: >- + {{ .binary_dir }}/pki/etcd.key \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/05-dns.yaml b/builtin/core/roles/defaults/defaults/main/05-dns.yaml new file mode 100644 index 00000000..684082c7 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/05-dns.yaml @@ -0,0 +1,78 @@ +dns: + # ====== In-Cluster DNS Service Configuration ====== + # The DNS domain suffix used for all services and pods within the cluster. + dns_domain: cluster.local + # CoreDNS image settings + dns_image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: >- + kubesphere + tag: v1.11.1 + # NodeLocalDNS image settings + dns_cache_image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: kubesphere/k8s-dns-node-cache + tag: 1.24.0 + # The IP address assigned to the cluster DNS service + dns_service_ip: >- + {{ index (.cni.service_cidr | ipInCIDR) 2 }} + # The IP address NodeLocalDNS will bind to on each node + dns_cache_ip: 169.254.25.10 + + # CoreDNS pod configuration + coredns: + dns_etc_hosts: [] + # DNS zone matching configuration + zone_configs: + # Each entry defines which DNS zones to match. The default port is 53. + # ".": matches all DNS zones. + # "example.com": matches *.example.com using DNS server on port 53. + # "example.com:54": matches *.example.com using DNS server on port 54. + - zones: [".:53"] + additional_configs: + - errors + - ready + - prometheus :9153 + - loop + - reload + - loadbalance + cache: 30 + kubernetes: + zones: + - "{{ .dns.dns_domain }}" + # You can configure internal DNS message rewriting here if needed. +# rewrite: +# - rule: continue +# field: name +# type: exact +# value: "example.com example2.com" +# options: "" + forward: + # DNS query forwarding rules. + - from: "." + # Destination endpoints for forwarding. The 'to' syntax allows protocol specification. + to: ["/etc/resolv.conf"] + # Domains to exclude from forwarding. + except: [] + # Use TCP for forwarding, even if the original request was UDP. + force_tcp: false + # Prefer UDP for forwarding; fallback to TCP if the response is truncated. + prefer_udp: false + # Number of consecutive failed health checks before marking an upstream as down. +# max_fails: 2 + # Time after which cached connections expire. +# expire: 10s + # TLS properties for secure connections can be set here. +# tls: +# cert_file: "" +# key_file: "" +# ca_file: "" +# tls_servername: "" + # Policy for selecting upstream servers: random (default), round_robin, sequential. +# policy: "random" + # Health check configuration for upstream servers. +# health_check: "" + # Maximum number of concurrent DNS queries allowed. + max_concurrent: 1000 \ No newline at end of file diff --git a/builtin/core/roles/defaults/defaults/main/05-storage_class.yaml b/builtin/core/roles/defaults/defaults/main/05-storage_class.yaml new file mode 100644 index 00000000..7d0726d1 --- /dev/null +++ b/builtin/core/roles/defaults/defaults/main/05-storage_class.yaml @@ -0,0 +1,32 @@ +# Storage class configuration for Kubernetes persistent storage integration +storage_class: + # Local storage class configuration + local: + enabled: true # Enable local storage class + default: true # Set as the default storage class + provisioner_image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: openebs/provisioner-localpv + tag: 4.3.0 + linux_utils_image: + registry: >- + {{ .image_registry.dockerio_registry }} + repository: openebs/linux-utils + tag: 4.3.0 + path: /var/openebs/local # Host path for local storage volumes + + # NFS storage class configuration + nfs: + # Ensure nfs-utils is installed on every node in the k8s_cluster group + enabled: false # Enable NFS storage class + default: false # Set as the default storage class + # NFS server address + server: >- + {{ .groups.nfs | default list | first }} + path: /share/kubernetes # NFS export path for persistent volumes + + # ========== storageclass ========== + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.20 \ No newline at end of file diff --git a/builtin/core/roles/defaults/tasks/main.yaml b/builtin/core/roles/defaults/tasks/main.yaml new file mode 100644 index 00000000..22233d69 --- /dev/null +++ b/builtin/core/roles/defaults/tasks/main.yaml @@ -0,0 +1,78 @@ +- name: Defaults | Load defaults based on Kubernetes version + block: + - name: Defaults | Load version-specific settings for Kubernetes + when: .kubernetes.kube_version | empty | not + include_vars: >- + {{ slice (.kubernetes.kube_version | splitList ".") 0 2 | join "." }}.yaml + - name: Defaults | Load architecture-specific download URLs for each artifact version + include_vars: 10-download.yaml + +- name: Defaults | Reset temporary directory + command: | + if [ -d {{ .tmp_dir }} ]; then + rm -rf {{ .tmp_dir }} + fi + mkdir -m 777 -p {{ .tmp_dir }} + +- name: Defaults | Determine operating system architecture for each node + set_fact: + binary_type: >- + {{- if .transform_architectures.amd64 | has .os.architecture -}} + amd64 + {{- else if .transform_architectures.arm64 | has .os.architecture -}} + arm64 + {{- end -}} + +- name: Defaults | Gather Kubernetes service status + when: .groups.k8s_cluster | has .inventory_hostname + block: + - name: Defaults | Get kubelet.service LoadState + command: systemctl show kubelet.service -p LoadState --value + register: kubernetes_install_LoadState + - name: Defaults | Get kubelet.service ActiveState + command: systemctl show kubelet.service -p ActiveState --value + register: kubernetes_install_ActiveState + - name: Defaults | Get installed Kubernetes version + ignore_errors: true + command: kubelet --version + register: kubernetes_install_version + +- name: Defaults | Gather ETCD service status + when: .groups.etcd | has .inventory_hostname + block: + - name: Defaults | Get etcd.service LoadState and save to variable + command: systemctl show etcd.service -p LoadState --value + register: etcd_install_LoadState + - name: Defaults | Get etcd.service ActiveState and save to variable + command: systemctl show etcd.service -p ActiveState --value + register: etcd_install_ActiveState + - name: Defaults | Get installed etcd version + ignore_errors: true + command: etcd --version + register: etcd_install_version + register_type: yaml + +- name: Defaults | Select the initialization node for the cluster + run_once: true + add_hostvars: + hosts: k8s_cluster + vars: + init_kubernetes_node: >- + {{- $initNodes := list -}} + {{- $notInitNodes := list -}} + {{- range .groups.kube_control_plane -}} + {{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}} + {{- $initNodes = append $initNodes . -}} + {{- else if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "not-found" -}} + {{- $notInitNodes = append $notInitNodes . -}} + {{- end -}} + {{- end -}} + {{- if $initNodes | len | eq 1 -}} + {{ $initNodes | first }} + {{- else if $initNodes | len | lt 1 -}} + {{ index $initNodes (randInt 0 ((sub ($initNodes | len) 1) | int)) }} + {{- else if $notInitNodes | len | eq 1 -}} + {{ $notInitNodes | first }} + {{- else if $notInitNodes | len | lt 1 -}} + {{ index $notInitNodes (randInt 0 ((sub ($notInitNodes | len) 1) | int)) }} + {{- end -}} \ No newline at end of file diff --git a/builtin/core/roles/defaults/vars/10-download.yaml b/builtin/core/roles/defaults/vars/10-download.yaml new file mode 100644 index 00000000..05b48fcd --- /dev/null +++ b/builtin/core/roles/defaults/vars/10-download.yaml @@ -0,0 +1,224 @@ +download: + # if set as "cn", so that online downloads will try to use available domestic sources whenever possible. + zone: "" + arch: [ "amd64" ] + # offline artifact package for kk. + artifact_file: "" + # the md5_file of artifact_file. + artifact_md5: "" + artifact_url: + etcd: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd.etcd_version }}/etcd-{{ .etcd.etcd_version }}-linux-amd64.tar.gz + {{- else -}} + https://github.com/etcd-io/etcd/releases/download/{{ .etcd.etcd_version }}/etcd-{{ .etcd.etcd_version }}-linux-amd64.tar.gz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd.etcd_version }}/etcd-{{ .etcd.etcd_version }}-linux-arm64.tar.gz + {{- else -}} + https://github.com/etcd-io/etcd/releases/download/{{ .etcd.etcd_version }}/etcd-{{ .etcd.etcd_version }}-linux-arm64.tar.gz + {{- end -}} + kubeadm: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubeadm + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubeadm + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubeadm + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubeadm + {{- end -}} + kubelet: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubelet + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubelet + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubelet + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubelet + {{- end -}} + kubectl: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubectl + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/amd64/kubectl + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubectl + {{- else -}} + https://dl.k8s.io/release/{{ .kubernetes.kube_version }}/bin/linux/arm64/kubectl + {{- end -}} + cni_plugins: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://github.com/containernetworking/plugins/releases/download/{{ .cni.cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni.cni_plugins_version }}.tgz + {{- else -}} + https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni.cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni.cni_plugins_version }}.tgz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://github.com/containernetworking/plugins/releases/download/{{ .cni.cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni.cni_plugins_version }}.tgz + {{- else -}} + https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni.cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni.cni_plugins_version }}.tgz + {{- end -}} + helm: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .kubernetes.helm_version }}-linux-amd64.tar.gz + {{- else -}} + https://get.helm.sh/helm-{{ .kubernetes.helm_version }}-linux-amd64.tar.gz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .kubernetes.helm_version }}-linux-arm64.tar.gz + {{- else -}} + https://get.helm.sh/helm-{{ .kubernetes.helm_version }}-linux-arm64.tar.gz + {{- end -}} + crictl: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .cri.crictl_version }}/crictl-{{ .cri.crictl_version }}-linux-amd64.tar.gz + {{- else -}} + https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .cri.crictl_version }}/crictl-{{ .cri.crictl_version }}-linux-amd64.tar.gz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .cri.crictl_version }}/crictl-{{ .cri.crictl_version }}-linux-arm64.tar.gz + {{- else -}} + https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .cri.crictl_version }}/crictl-{{ .cri.crictl_version }}-linux-arm64.tar.gz + {{- end -}} + docker: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ .cri.docker_version }}.tgz + {{- else -}} + https://download.docker.com/linux/static/stable/x86_64/docker-{{ .cri.docker_version }}.tgz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ .cri.docker_version }}.tgz + {{- else -}} + https://download.docker.com/linux/static/stable/aarch64/docker-{{ .cri.docker_version }}.tgz + {{- end -}} + cridockerd: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cri.cridockerd_version }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz + {{- else -}} + https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cri.cridockerd_version }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cri.cridockerd_version }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz + {{- else -}} + https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cri.cridockerd_version }}/cri-dockerd-{{ .cri.cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz + {{- end -}} + containerd: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .cri.containerd_version }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz + {{- else -}} + https://github.com/containerd/containerd/releases/download/{{ .cri.containerd_version }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .cri.containerd_version }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz + {{- else -}} + https://github.com/containerd/containerd/releases/download/{{ .cri.containerd_version }}/containerd-{{ .cri.containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz + {{- end -}} + runc: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .cri.runc_version }}/runc.amd64 + {{- else -}} + https://github.com/opencontainers/runc/releases/download/{{ .cri.runc_version }}/runc.amd64 + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .cri.runc_version }}/runc.arm64 + {{- else -}} + https://github.com/opencontainers/runc/releases/download/{{ .cri.runc_version }}/runc.arm64 + {{- end -}} + dockercompose: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .cri.dockercompose_version }}/docker-compose-linux-x86_64 + {{- else -}} + https://github.com/docker/compose/releases/download/{{ .cri.dockercompose_version }}/docker-compose-linux-x86_64 + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .cri.dockercompose_version }}/docker-compose-linux-aarch64 + {{- else -}} + https://github.com/docker/compose/releases/download/{{ .cri.dockercompose_version }}/docker-compose-linux-aarch64 + {{- end -}} +# docker_registry: +# amd64: >- +# {{- if .download.zone | eq "cn" -}} +# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .image_registry.docker_registry_version }}/docker-registry-{{ .image_registry.docker_registry_version }}-linux-amd64.tgz +# {{- else -}} +# https://github.com/kubesphere/kubekey/releases/download/{{ .image_registry.docker_registry_version }}/docker-registry-{{ .image_registry.docker_registry_version }}-linux-amd64.tgz +# {{- end -}} +# arm64: >- +# {{- if .download.zone | eq "cn" -}} +# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .image_registry.docker_registry_version }}/docker-registry-{{ .image_registry.docker_registry_version }}-linux-arm64.tgz +# {{- else -}} +# https://github.com/kubesphere/kubekey/releases/download/{{ .image_registry.docker_registry_version }}/docker-registry-{{ .image_registry.docker_registry_version }}-linux-arm64.tgz +# {{- end -}} + harbor: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://github.com/goharbor/harbor/releases/download/{{ .image_registry.harbor_version }}/harbor-offline-installer-{{ .image_registry.harbor_version }}.tgz + {{- else -}} + https://github.com/goharbor/harbor/releases/download/{{ .image_registry.harbor_version }}/harbor-offline-installer-{{ .image_registry.harbor_version }}.tgz + {{- end -}} +# arm64: >- +# {{- if .download.zone | eq "cn" -}} +# https://github.com/goharbor/harbor/releases/download/{{ .image_registry.harbor_version }}/harbor-{{ .image_registry.harbor_version }}-linux-arm64.tgz +# {{- else -}} +# https://github.com/goharbor/harbor/releases/download/{{ .image_registry.harbor_version }}/harbor-{{ .image_registry.harbor_version }}-linux-arm64.tgz +# {{- end -}} +# keepalived: +# amd64: >- +# {{- if .download.zone | eq "cn" -}} +# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .image_registry.keepalived_version }}/keepalived-{{ .image_registry.keepalived_version }}-linux-amd64.tgz +# {{- else -}} +# https://github.com/osixia/keepalived/releases/download/{{ .image_registry.keepalived_version }}/keepalived-{{ .image_registry.keepalived_version }}-linux-amd64.tgz +# {{- end -}} +# arm64: >- +# {{- if .download.zone | eq "cn" -}} +# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .image_registry.keepalived_version }}/keepalived-{{ .image_registry.keepalived_version }}-linux-arm64.tgz +# {{- else -}} +# https://github.com/osixia/keepalived/releases/download/{{ .image_registry.keepalived_version }}/keepalived-{{ .image_registry.keepalived_version }}-linux-arm64.tgz +# {{- end -}} + # Notice: In the early calico helm chart, appVersion is not same as version(eg. v3.17.4) + calico: https://github.com/projectcalico/calico/releases/download/{{ .cni.calico_version }}/tigera-operator-{{ .cni.calico_version }}.tgz + calicoctl: + amd64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .cni.calico_version }}/calicoctl-linux-amd64 + {{- else -}} + https://github.com/projectcalico/calico/releases/download/{{ .cni.calico_version }}/calicoctl-linux-amd64 + {{- end -}} + arm64: >- + {{- if .download.zone | eq "cn" -}} + https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .cni.calico_version }}/calicoctl-linux-arm64 + {{- else -}} + https://github.com/projectcalico/calico/releases/download/{{ .cni.calico_version }}/calicoctl-linux-arm64 + {{- end -}} + cilium: https://helm.cilium.io/cilium-{{ .cni.cilium_version }}.tgz + kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ .cni.kubeovn_version }}.tgz + hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ .cni.hybridnet_version }}/hybridnet-{{ .cni.hybridnet_version }}.tgz + nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ .storage_class.nfs_provisioner_version }}.tgz + download_image: true diff --git a/builtin/core/roles/defaults/vars/v1.23.yaml b/builtin/core/roles/defaults/vars/v1.23.yaml new file mode 100644 index 00000000..0f535a07 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.23.yaml @@ -0,0 +1,99 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.8.2 + etcd: + # etcd binary + etcd_version: v3.5.4 + image_registry: + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.6.3 + # docker-compose binary + dockercompose_version: v2.12.2 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: docker + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.23.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.18 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.10 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.8 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.9.3 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.24.5 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.12.6 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.3.0 + linux_utils_image: + tag: 3.3.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.2 + dns: + dns_image: + tag: v1.8.6 + dns_cache_image: + tag: 1.21.1 + # image_manifests: + # - docker.io/calico/apiserver:v3.24.5 + # - docker.io/calico/cni:v3.24.5 + # - docker.io/calico/csi:v3.24.5 + # - docker.io/calico/kube-controllers:v3.24.5 + # - docker.io/calico/node-driver-registrar:v3.24.5 + # - docker.io/calico/node:v3.24.5 + # - docker.io/calico/pod2daemon-flexvol:v3.24.5 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.6 + # - quay.io/tigera/operator:v1.28.5 + # - docker.io/calico/ctl:v3.24.5 + # - docker.io/calico/typha:v3.24.5 + # - docker.io/calico/apiserver:v3.24.5 + # - docker.io/calico/kube-controllers:v3.24.5 + # - docker.io/calico/node:v3.24.5 + # - docker.io/calico/pod2daemon-flexvol:v3.24.5 + # - docker.io/calico/cni:v3.24.5 + # - docker.io/calico/node-driver-registrar:v3.24.5 + # - docker.io/calico/csi:v3.24.5 diff --git a/builtin/core/roles/defaults/vars/v1.24.yaml b/builtin/core/roles/defaults/vars/v1.24.yaml new file mode 100644 index 00000000..05249065 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.24.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.10.3 + etcd: + # etcd binary + etcd_version: v3.5.6 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: v2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.7.1 + # docker-compose binary + dockercompose_version: v2.14.0 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.24.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.24 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.16 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.10.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.25.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.4.0 + linux_utils_image: + tag: 3.4.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.2 + dns: + dns_image: + tag: v1.8.6 + dns_cache_image: + tag: 1.22.20 + # image_manifests: + # - docker.io/calico/apiserver:v3.25.1 + # - docker.io/calico/cni:v3.25.1 + # - docker.io/calico/csi:v3.25.1 + # - docker.io/calico/kube-controllers:v3.25.1 + # - docker.io/calico/node-driver-registrar:v3.25.1 + # - docker.io/calico/node:v3.25.1 + # - docker.io/calico/pod2daemon-flexvol:v3.25.1 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.6 + # - quay.io/tigera/operator:v1.29.3 + # - docker.io/calico/ctl:v3.25.1 + # - docker.io/calico/typha:v3.25.1 + # - docker.io/calico/apiserver:v3.25.1 + # - docker.io/calico/kube-controllers:v3.25.1 + # - docker.io/calico/node:v3.25.1 + # - docker.io/calico/pod2daemon-flexvol:v3.25.1 + # - docker.io/calico/cni:v3.25.1 + # - docker.io/calico/node-driver-registrar:v3.25.1 + # - docker.io/calico/csi:v3.25.1 diff --git a/builtin/core/roles/defaults/vars/v1.25.yaml b/builtin/core/roles/defaults/vars/v1.25.yaml new file mode 100644 index 00000000..79efaba5 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.25.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.10.3 + etcd: + # etcd binary + etcd_version: v3.5.7 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.8.1 + # docker-compose binary + dockercompose_version: v2.15.1 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.6" + # ========== cri ========== + # crictl binary + crictl_version: v1.25.0 + # ========== cri: docker ========== + # docker binary + docker_version: 20.10.24 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.19 + # runc binary + runc_version: v1.1.4 + cni: + multus: + image: + tag: v3.11.3 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.1.1 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.25.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.5.0 + linux_utils_image: + tag: 3.5.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.8 + dns: + dns_image: + tag: v1.9.3 + dns_cache_image: + tag: 1.22.20 + # image_manifests: + # - docker.io/calico/apiserver:v3.25.1 + # - docker.io/calico/cni:v3.25.1 + # - docker.io/calico/csi:v3.25.1 + # - docker.io/calico/kube-controllers:v3.25.1 + # - docker.io/calico/node-driver-registrar:v3.25.1 + # - docker.io/calico/node:v3.25.1 + # - docker.io/calico/pod2daemon-flexvol:v3.25.1 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.6 + # - quay.io/tigera/operator:v1.29.3 + # - docker.io/calico/ctl:v3.25.1 + # - docker.io/calico/typha:v3.25.1 + # - docker.io/calico/apiserver:v3.25.1 + # - docker.io/calico/kube-controllers:v3.25.1 + # - docker.io/calico/node:v3.25.1 + # - docker.io/calico/pod2daemon-flexvol:v3.25.1 + # - docker.io/calico/cni:v3.25.1 + # - docker.io/calico/node-driver-registrar:v3.25.1 + # - docker.io/calico/csi:v3.25.1 diff --git a/builtin/core/roles/defaults/vars/v1.26.yaml b/builtin/core/roles/defaults/vars/v1.26.yaml new file mode 100644 index 00000000..4012912a --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.26.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.11.2 + etcd: + # etcd binary + etcd_version: v3.5.8 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.9.1 + # docker-compose binary + dockercompose_version: v2.16.0 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.7" + # ========== cri ========== + # crictl binary + crictl_version: v1.26.0 + # ========== cri: docker ========== + # docker binary + docker_version: 23.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.6.21 + # runc binary + runc_version: v1.1.5 + cni: + multus: + image: + tag: v4.0.2 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.26.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.13.5 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.10.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.6.0 + linux_utils_image: + tag: 3.6.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.8 + dns: + dns_image: + tag: v1.9.3 + dns_cache_image: + tag: 1.22.20 + # image_manifests: + # - docker.io/calico/apiserver:v3.26.1 + # - docker.io/calico/cni:v3.26.1 + # - docker.io/calico/csi:v3.26.1 + # - docker.io/calico/kube-controllers:v3.26.1 + # - docker.io/calico/node-driver-registrar:v3.26.1 + # - docker.io/calico/node:v3.26.1 + # - docker.io/calico/pod2daemon-flexvol:v3.26.1 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.7 + # - quay.io/tigera/operator:v1.30.4 + # - docker.io/calico/ctl:v3.26.1 + # - docker.io/calico/typha:v3.26.1 + # - docker.io/calico/apiserver:v3.26.1 + # - docker.io/calico/kube-controllers:v3.26.1 + # - docker.io/calico/node:v3.26.1 + # - docker.io/calico/pod2daemon-flexvol:v3.26.1 + # - docker.io/calico/cni:v3.26.1 + # - docker.io/calico/node-driver-registrar:v3.26.1 + # - docker.io/calico/csi:v3.26.1 diff --git a/builtin/core/roles/defaults/vars/v1.27.yaml b/builtin/core/roles/defaults/vars/v1.27.yaml new file mode 100644 index 00000000..871157f8 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.27.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.12.1 + etcd: + # etcd binary + etcd_version: v3.5.9 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.7" + # ========== cri ========== + # crictl binary + crictl_version: v1.27.0 + # ========== cri: docker ========== + # docker binary + docker_version: 23.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.2 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.0.2 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.26.1 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.14.2 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.11.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.7.0 + linux_utils_image: + tag: 3.7.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.10 + dns: + dns_image: + tag: v1.10.1 + dns_cache_image: + tag: 1.22.20 + # image_manifests: + # - docker.io/calico/apiserver:v3.26.1 + # - docker.io/calico/cni:v3.26.1 + # - docker.io/calico/csi:v3.26.1 + # - docker.io/calico/kube-controllers:v3.26.1 + # - docker.io/calico/node-driver-registrar:v3.26.1 + # - docker.io/calico/node:v3.26.1 + # - docker.io/calico/pod2daemon-flexvol:v3.26.1 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.7 + # - quay.io/tigera/operator:v1.30.4 + # - docker.io/calico/ctl:v3.26.1 + # - docker.io/calico/typha:v3.26.1 + # - docker.io/calico/apiserver:v3.26.1 + # - docker.io/calico/kube-controllers:v3.26.1 + # - docker.io/calico/node:v3.26.1 + # - docker.io/calico/pod2daemon-flexvol:v3.26.1 + # - docker.io/calico/cni:v3.26.1 + # - docker.io/calico/node-driver-registrar:v3.26.1 + # - docker.io/calico/csi:v3.26.1 diff --git a/builtin/core/roles/defaults/vars/v1.28.yaml b/builtin/core/roles/defaults/vars/v1.28.yaml new file mode 100644 index 00000000..347120ab --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.28.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.12.1 + etcd: + # etcd binary + etcd_version: v3.5.9 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.28.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.6 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.3 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.1.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.0 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.12.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.8.0 + linux_utils_image: + tag: 3.8.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.0.10 + dns: + dns_image: + tag: v1.10.1 + dns_cache_image: + tag: 1.22.20 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.8 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/defaults/vars/v1.29.yaml b/builtin/core/roles/defaults/vars/v1.29.yaml new file mode 100644 index 00000000..639e7316 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.29.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.10 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.29.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.1.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 3.9.0 + linux_utils_image: + tag: 3.9.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.1.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.23.1 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.8 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/defaults/vars/v1.30.yaml b/builtin/core/roles/defaults/vars/v1.30.yaml new file mode 100644 index 00000000..bd2f5655 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.30.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.10 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.30.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.2.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.0.0 + linux_utils_image: + tag: 4.0.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.1.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.23.1 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.8 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/defaults/vars/v1.31.yaml b/builtin/core/roles/defaults/vars/v1.31.yaml new file mode 100644 index 00000000..8ba1b54d --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.31.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.13.3 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.31.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.2.1 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.1.0 + linux_utils_image: + tag: 4.1.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.2.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.23.1 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.8 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/defaults/vars/v1.32.yaml b/builtin/core/roles/defaults/vars/v1.32.yaml new file mode 100644 index 00000000..9effaee2 --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.32.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.14.3 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.8" + # ========== cri ========== + # crictl binary + crictl_version: v1.32.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.3.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.2.0 + linux_utils_image: + tag: 4.2.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.2.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.24.0 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.8 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/defaults/vars/v1.33.yaml b/builtin/core/roles/defaults/vars/v1.33.yaml new file mode 100644 index 00000000..5718b49c --- /dev/null +++ b/builtin/core/roles/defaults/vars/v1.33.yaml @@ -0,0 +1,100 @@ +apiVersion: kubekey.kubesphere.io/v1 +kind: Config +spec: + kubernetes: + # helm binary + helm_version: v3.18.5 + etcd: + # etcd binary + etcd_version: v3.5.11 + image_registry: + # ========== image registry ========== + # keepalived image tag. Used for load balancing when there are multiple image registry nodes. + keepalived_version: 2.0.20 + # ========== image registry: harbor ========== + # harbor image tag + harbor_version: v2.10.1 + # docker-compose binary + dockercompose_version: v2.20.3 + # ========== image registry: docker-registry ========== + # docker-registry image tag + docker_registry_version: 2.8.3 + cri: + # support: containerd,docker + container_manager: containerd + sandbox_image: + tag: "3.9" + # ========== cri ========== + # crictl binary + crictl_version: v1.33.0 + # ========== cri: docker ========== + # docker binary + docker_version: 24.0.7 + # cridockerd. Required when kube_version is greater than 1.24 + cridockerd_version: v0.3.1 + # ========== cri: containerd ========== + # containerd binary + containerd_version: v1.7.6 + # runc binary + runc_version: v1.1.7 + cni: + multus: + image: + tag: v4.3.0 + # ========== cni ========== + # cni_plugins binary (optional) + # cni_plugins_version: v1.2.0 + # ========== cni: calico ========== + # calicoctl binary + calico_version: v3.28.2 + # ========== cni: cilium ========== + # cilium helm + cilium_version: 1.15.4 + # ========== cni: kubeovn ========== + # kubeovn helm + kubeovn_version: 1.13.0 + # ========== cni: hybridnet ========== + # hybridnet helm + hybridnet_version: 0.6.8 + storage_class: + # ========== storageclass ========== + # ========== storageclass: local ========== + local: + provisioner_image: + tag: 4.3.0 + linux_utils_image: + tag: 4.3.0 + # ========== storageclass: nfs ========== + # nfs provisioner helm version + nfs_provisioner_version: 4.3.0 + dns: + dns_image: + tag: v1.11.1 + dns_cache_image: + tag: 1.24.0 + # image_manifests: + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/csi:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/kubesphere/k8s-dns-node-cache:1.22.20 + # - docker.io/openebs/provisioner-localpv:3.3.0 + # - docker.io/coredns/coredns:1.8.6 + # - docker.io/kubesphere/kube-apiserver:{{ .kube_version }} + # - docker.io/kubesphere/kube-controller-manager:{{ .kube_version }} + # - docker.io/kubesphere/kube-proxy:{{ .kube_version }} + # - docker.io/kubesphere/kube-scheduler:{{ .kube_version }} + # - docker.io/kubesphere/pause:3.9 + # - quay.io/tigera/operator:v1.34.5 + # - docker.io/calico/ctl:v3.28.2 + # - docker.io/calico/typha:v3.28.2 + # - docker.io/calico/apiserver:v3.28.2 + # - docker.io/calico/kube-controllers:v3.28.2 + # - docker.io/calico/node:v3.28.2 + # - docker.io/calico/pod2daemon-flexvol:v3.28.2 + # - docker.io/calico/cni:v3.28.2 + # - docker.io/calico/node-driver-registrar:v3.28.2 + # - docker.io/calico/csi:v3.28.2 diff --git a/builtin/core/roles/init/init-artifact/tasks/download_binary.yaml b/builtin/core/roles/download/tasks/binary.yaml similarity index 56% rename from builtin/core/roles/init/init-artifact/tasks/download_binary.yaml rename to builtin/core/roles/download/tasks/binary.yaml index 76070d1a..768732ad 100644 --- a/builtin/core/roles/init/init-artifact/tasks/download_binary.yaml +++ b/builtin/core/roles/download/tasks/binary.yaml @@ -1,266 +1,288 @@ --- - name: Binary | Ensure etcd binary is present tags: ["etcd"] + loop: "{{ .download.arch | toJson }}" + when: + - .etcd.deployment_type | eq "external" + - .etcd.etcd_version | empty | not command: | - artifact_name={{ get .artifact.artifact_url.etcd .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/etcd/{{ .etcd_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.etcd .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/etcd/{{ .etcd.etcd_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download etcd binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.etcd .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.etcd .item }}) if [ $http_code != 200 ]; then echo "Failed to download etcd binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.etcd .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.etcd .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .etcd_version | empty | not - name: Binary | Ensure Kubernetes binaries are present - tags: ["kube"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: .kubernetes.kube_version | empty | not command: | - kube_path={{ .binary_dir }}/kube/{{ .kube_version }}/{{ .item }} + kube_path={{ .binary_dir }}/kube/{{ .kubernetes.kube_version }}/{{ .item }} if [ ! -f $kube_path/kubelet ]; then mkdir -p $kube_path # Download kubelet if missing - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.kubelet .item }}) if [ $http_code != 200 ]; then echo "Failed to download kubelet. HTTP status code: $http_code" exit 1 fi - curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }} + curl -L -o $kube_path/kubelet {{ get .download.artifact_url.kubelet .item }} fi if [ ! -f $kube_path/kubeadm ]; then mkdir -p $kube_path # Download kubeadm if missing - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.kubeadm .item }}) if [ $http_code != 200 ]; then echo "Failed to download kubeadm. HTTP status code: $http_code" exit 1 fi - curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }} + curl -L -o $kube_path/kubeadm {{ get .download.artifact_url.kubeadm .item }} fi if [ ! -f $kube_path/kubectl ]; then mkdir -p $kube_path # Download kubectl if missing - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.kubectl .item }}) if [ $http_code != 200 ]; then echo "Failed to download kubectl. HTTP status code: $http_code" exit 1 fi - curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }} + curl -L -o $kube_path/kubectl {{ get .download.artifact_url.kubectl .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .kube_version | empty | not - name: Binary | Ensure CNI plugins are present - tags: ["cni"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: .cni.cni_plugins_version | empty | not command: | - artifact_name={{ get .artifact.artifact_url.cni_plugins .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.cni_plugins .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/cni/plugins/{{ .cni.cni_plugins_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download CNI plugins - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni_plugins .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.cni_plugins .item }}) if [ $http_code != 200 ]; then echo "Failed to download CNI plugins. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cni_plugins .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.cni_plugins .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .cni_plugins_version | empty | not - name: Binary | Ensure Helm binary is present - tags: ["helm"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: .kubernetes.helm_version | empty | not command: | - artifact_name={{ get .artifact.artifact_url.helm .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/helm/{{ .helm_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.helm .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/helm/{{ .kubernetes.helm_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download Helm binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.helm .item }}) if [ $http_code != 200 ]; then echo "Failed to download Helm binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.helm .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .helm_version | empty | not - name: Binary | Ensure crictl binary is present - tags: ["crictl"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: .cri.crictl_version | empty | not command: | - artifact_name={{ get .artifact.artifact_url.crictl .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.crictl .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/crictl/{{ .cri.crictl_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download crictl binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.crictl .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.crictl .item }}) if [ $http_code != 200 ]; then echo "Failed to download crictl binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.crictl .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.crictl .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .crictl_version | empty | not - name: Binary | Ensure Docker binary is present - tags: ["docker"] + tags: ["kubernetes","image_registry"] + loop: "{{ .download.arch | toJson }}" + when: + - .cri.docker_version | empty | not + - or (.image_registry.type | empty | not) (.cri.container_manager | eq "docker") command: | - artifact_name={{ get .artifact.artifact_url.docker .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/docker/{{ .docker_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.docker .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/docker/{{ .cri.docker_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download Docker binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.docker .item }}) if [ $http_code != 200 ]; then echo "Failed to download Docker binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.docker .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .docker_version | empty | not - name: Binary | Ensure cri-dockerd binary is present - tags: ["cridockerd"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: + - .cri.cridockerd_version | empty | not + - .cri.container_manager | eq "docker" + - .kubernetes.kube_version | semverCompare ">=v1.24.0" command: | - artifact_name={{ get .artifact.artifact_url.cridockerd .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.cridockerd .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/cri-dockerd/{{ .cri.cridockerd_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download cri-dockerd binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cridockerd .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.cridockerd .item }}) if [ $http_code != 200 ]; then echo "Failed to download cri-dockerd binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cridockerd .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.cridockerd .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .cridockerd_version | empty | not - name: Binary | Ensure containerd binary is present - tags: ["containerd"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: + - .cri.containerd_version | empty | not + - .cri.container_manager | eq "containerd" command: | - artifact_name={{ get .artifact.artifact_url.containerd .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.containerd .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/containerd/{{ .cri.containerd_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download containerd binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.containerd .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.containerd .item }}) if [ $http_code != 200 ]; then echo "Failed to download containerd binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.containerd .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.containerd .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .containerd_version | empty | not - name: Binary | Ensure runc binary is present - tags: ["runc"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: + - .cri.runc_version | empty | not + - .cri.container_manager | eq "containerd" command: | - artifact_name={{ get .artifact.artifact_url.runc .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/runc/{{ .runc_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.runc .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/runc/{{ .cri.runc_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download runc binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.runc .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.runc .item }}) if [ $http_code != 200 ]; then echo "Failed to download runc binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.runc .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.runc .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .runc_version | empty | not - name: Binary | Ensure calicoctl binary is present - tags: ["calicoctl"] + tags: ["kubernetes"] + loop: "{{ .download.arch | toJson }}" + when: + - .cni.calico_version | empty | not + - .cni.type | eq "calico" command: | artifact_name=calicoctl - artifact_path={{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .item }} + artifact_path={{ .binary_dir }}/cni/calico/{{ .cni.calico_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download calicoctl binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.calicoctl .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.calicoctl .item }}) if [ $http_code != 200 ]; then echo "Failed to download calicoctl binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.calicoctl .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.calicoctl .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .calico_version | empty | not - name: Binary | Ensure Docker Registry binary is present - tags: ["registry"] + tags: ["image_registry"] + loop: "{{ .download.arch | toJson }}" + when: + - .image_registry.docker_registry_version | empty | not + - .image_registry.type | eq "docker-registry" command: | - artifact_name={{ get .artifact.artifact_url.docker_registry .item | splitList "/" | last }} - artifact_path={{ .binary_dir }}/image-registry/docker-registry/{{ .docker_registry_version }}/{{ .item }} + artifact_name={{ get .download.artifact_url.docker_registry .item | splitList "/" | last }} + artifact_path={{ .binary_dir }}/image-registry/docker-registry/{{ .image_registry.docker_registry_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download Docker Registry binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker_registry .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.docker_registry .item }}) if [ $http_code != 200 ]; then echo "Failed to download Docker Registry binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker_registry .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.docker_registry .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .docker_registry_version | empty | not - name: Binary | Ensure docker-compose binary is present - tags: ["docker-compose"] + tags: ["image_registry"] + loop: "{{ .download.arch | toJson }}" + when: + - .cri.dockercompose_version | empty | not + - .image_registry.type | eq "harbor" command: | compose_name=docker-compose - compose_path={{ .binary_dir }}/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .item }} + compose_path={{ .binary_dir }}/image-registry/docker-compose/{{ .cri.dockercompose_version }}/{{ .item }} if [ ! -f $compose_path/$compose_name ]; then mkdir -p $compose_path # Attempt to download docker-compose binary - curl -L -o $compose_path/$compose_name {{ get .artifact.artifact_url.dockercompose .item }} + curl -L -o $compose_path/$compose_name {{ get .download.artifact_url.dockercompose .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .dockercompose_version | empty | not - name: Binary | Ensure Harbor binary is present - tags: ["harbor"] + tags: ["image_registry"] + loop: "{{ .download.arch | toJson }}" + when: + - .image_registry.harbor_version | empty | not + - .image_registry.type | eq "harbor" command: | - harbor_name={{ get .artifact.artifact_url.harbor .item | splitList "/" | last }} - harbor_path={{ .binary_dir }}/image-registry/harbor/{{ .harbor_version }}/{{ .item }} + harbor_name={{ get .download.artifact_url.harbor .item | splitList "/" | last }} + harbor_path={{ .binary_dir }}/image-registry/harbor/{{ .image_registry.harbor_version }}/{{ .item }} if [ ! -f $harbor_path/$harbor_name ]; then mkdir -p $harbor_path # Attempt to download Harbor binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.harbor .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.harbor .item }}) if [ $http_code != 200 ]; then echo "Failed to download Harbor binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $harbor_path/$harbor_name {{ get .artifact.artifact_url.harbor .item }} + curl -L -o $harbor_path/$harbor_name {{ get .download.artifact_url.harbor .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .harbor_version | empty | not - name: Binary | Ensure keepalived binary is present - tags: ["keepalived"] + tags: ["image_registry"] + loop: "{{ .download.arch | toJson }}" + when: + - .image_registry.keepalived_version | empty | not + - .image_registry.ha_vip | empty | not + - .groups.image_registry | len | lt 1 command: | - artifact_name={{ get .artifact.artifact_url.keepalived .item | splitList "/" | last }} + artifact_name={{ get .download.artifact_url.keepalived .item | splitList "/" | last }} artifact_path={{ .binary_dir }}/image-registry/keepalived/{{ .keepalived_version }}/{{ .item }} if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Attempt to download keepalived binary - http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.keepalived .item }}) + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .download.artifact_url.keepalived .item }}) if [ $http_code != 200 ]; then echo "Failed to download keepalived binary. HTTP status code: $http_code" exit 1 fi - curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.keepalived .item }} + curl -L -o $artifact_path/$artifact_name {{ get .download.artifact_url.keepalived .item }} fi - loop: "{{ .artifact.arch | toJson }}" - when: .keepalived_version | empty | not diff --git a/builtin/core/roles/init/init-artifact/tasks/download_helm.yaml b/builtin/core/roles/download/tasks/helm.yaml similarity index 58% rename from builtin/core/roles/init/init-artifact/tasks/download_helm.yaml rename to builtin/core/roles/download/tasks/helm.yaml index 3383fc2f..29155e27 100644 --- a/builtin/core/roles/init/init-artifact/tasks/download_helm.yaml +++ b/builtin/core/roles/download/tasks/helm.yaml @@ -1,69 +1,78 @@ --- - name: Helm | Ensure the Calico binary is available + when: + - .cni.calico_version | empty | not + - .cni.type | eq "calico" command: | - artifact_name={{ .artifact.artifact_url.calico | splitList "/" | last }} + artifact_name={{ .download.artifact_url.calico | splitList "/" | last }} artifact_path={{ .binary_dir }}/cni/calico if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the Calico binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.calico }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.calico }} fi - when: .calico_version | empty | not - name: Helm | Ensure the Cilium binary is available + when: + - .cni.cilium_version | empty | not + - .cni.type | eq "cilium" command: | - artifact_name={{ .artifact.artifact_url.cilium | splitList "/" | last }} + artifact_name={{ .download.artifact_url.cilium | splitList "/" | last }} artifact_path={{ .binary_dir }}/cni/cilium if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the Cilium binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.cilium }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.cilium }} fi - when: .cilium_version | empty | not - name: Helm | Ensure the Flannel binary is available + when: + - .cni.flannel_version | empty | not + - .cni.type | eq "flannel" command: | - artifact_name={{ .artifact.artifact_url.flannel | splitList "/" | last }} + artifact_name={{ .download.artifact_url.flannel | splitList "/" | last }} artifact_path={{ .binary_dir }}/cni/flannel if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the Flannel binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.flannel }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.flannel }} fi - when: .flannel_version | empty | not - name: Helm | Ensure the Kube-OVN binary is available - tags: ["kubeovn"] + when: + - .kubeovn_version | empty | not + - .cni.type | eq "kubeovn" command: | - artifact_name={{ .artifact.artifact_url.kubeovn | splitList "/" | last }} + artifact_name={{ .download.artifact_url.kubeovn | splitList "/" | last }} artifact_path={{ .binary_dir }}/cni/kubeovn if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the Kube-OVN binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.kubeovn }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.kubeovn }} fi - when: .kubeovn_version | empty | not - name: Helm | Ensure the Hybridnet binary is available - tags: ["hybridnet"] + when: + - .cni.hybridnet_version | empty | not + - .cni.type | eq "hybridnet" command: | - artifact_name={{ .artifact.artifact_url.hybridnet | splitList "/" | last }} + artifact_name={{ .download.artifact_url.hybridnet | splitList "/" | last }} artifact_path={{ .binary_dir }}/cni/hybridnet if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the Hybridnet binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.hybridnet }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.hybridnet }} fi - when: .hybridnet_version | empty | not - name: Helm | Ensure the NFS Provisioner binary is available - tags: ["nfs_provisioner"] + when: + - .storage_class.nfs_provisioner_version | empty | not + - .storage_class.nfs.enabled command: | - artifact_name={{ .artifact.artifact_url.nfs_provisioner | splitList "/" | last }} + artifact_name={{ .download.artifact_url.nfs_provisioner | splitList "/" | last }} artifact_path={{ .binary_dir }}/sc if [ ! -f $artifact_path/$artifact_name ]; then mkdir -p $artifact_path # Download the NFS Provisioner binary if it does not exist - curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.nfs_provisioner }} + curl -Lo $artifact_path/$artifact_name {{ .download.artifact_url.nfs_provisioner }} fi - when: .nfs_provisioner_version | empty | not diff --git a/builtin/core/roles/download/tasks/images.yaml b/builtin/core/roles/download/tasks/images.yaml new file mode 100644 index 00000000..674dd6ba --- /dev/null +++ b/builtin/core/roles/download/tasks/images.yaml @@ -0,0 +1,8 @@ +- name: Image | Download container images + image: + pull: + images_dir: >- + {{ .binary_dir }}/images/ + manifests: "{{ .image_manifests | toJson }}" + when: + - .image_manifests | default list | empty | not \ No newline at end of file diff --git a/builtin/core/roles/init/init-artifact/tasks/main.yaml b/builtin/core/roles/download/tasks/main.yaml similarity index 53% rename from builtin/core/roles/init/init-artifact/tasks/main.yaml rename to builtin/core/roles/download/tasks/main.yaml index dbd60ff1..d0bce27d 100644 --- a/builtin/core/roles/init/init-artifact/tasks/main.yaml +++ b/builtin/core/roles/download/tasks/main.yaml @@ -1,30 +1,27 @@ --- - name: Artifact | Extract artifact archive to working directory + tags: ["always"] command: | - if [ -f "{{ .artifact_file }}" ]; then + if [ -f "{{ .download.artifact_file }}" ]; then mkdir -p {{ .binary_dir }} tar -zxvf {{ .artifact_file }} -C {{ .binary_dir }} fi - when: .artifact_file | empty | not + when: .download.artifact_file | empty | not - name: Artifact | Download required binaries and images - when: .artifact_file | empty + when: .download.artifact_file | empty block: # Download core binaries - - include_tasks: download_binary.yaml + - include_tasks: binary.yaml # Download Helm and CNI binaries - - include_tasks: download_helm.yaml + - include_tasks: helm.yaml + tags: ["kubernetes"] # Download remote images to the local images directory - - name: Artifact | Download container images - image: - pull: - images_dir: >- - {{ .binary_dir }}/images/ - manifests: "{{ .image_manifests | toJson }}" - when: - - .image_manifests | default list | empty | not + - include_tasks: images.yaml + tags: ["kubernetes", "image_registry"] - name: Artifact | Set ownership of working directory to sudo user + tags: ["always"] ignore_errors: true command: | chown -R ${SUDO_UID}:${SUDO_GID} {{ .work_dir }} diff --git a/builtin/core/roles/install/etcd/files/backup.service b/builtin/core/roles/etcd/files/backup.service similarity index 100% rename from builtin/core/roles/install/etcd/files/backup.service rename to builtin/core/roles/etcd/files/backup.service diff --git a/builtin/core/roles/install/etcd/files/etcd.service b/builtin/core/roles/etcd/files/etcd.service similarity index 100% rename from builtin/core/roles/install/etcd/files/etcd.service rename to builtin/core/roles/etcd/files/etcd.service diff --git a/builtin/core/roles/install/etcd/tasks/backup_service.yaml b/builtin/core/roles/etcd/tasks/backup_service.yaml similarity index 100% rename from builtin/core/roles/install/etcd/tasks/backup_service.yaml rename to builtin/core/roles/etcd/tasks/backup_service.yaml diff --git a/builtin/core/roles/install/etcd/tasks/expansion.yaml b/builtin/core/roles/etcd/tasks/expansion.yaml similarity index 100% rename from builtin/core/roles/install/etcd/tasks/expansion.yaml rename to builtin/core/roles/etcd/tasks/expansion.yaml diff --git a/builtin/core/roles/install/etcd/tasks/install.yaml b/builtin/core/roles/etcd/tasks/install.yaml similarity index 100% rename from builtin/core/roles/install/etcd/tasks/install.yaml rename to builtin/core/roles/etcd/tasks/install.yaml diff --git a/builtin/core/roles/install/etcd/tasks/main.yaml b/builtin/core/roles/etcd/tasks/main.yaml similarity index 84% rename from builtin/core/roles/install/etcd/tasks/main.yaml rename to builtin/core/roles/etcd/tasks/main.yaml index 1213f3ec..167fccc7 100644 --- a/builtin/core/roles/install/etcd/tasks/main.yaml +++ b/builtin/core/roles/etcd/tasks/main.yaml @@ -4,7 +4,7 @@ - name: ETCD | Upgrade etcd if a newer version is available when: - .etcd_install_LoadState.stdout | eq "loaded" - - .etcd_version | semverCompare (printf ">v%s" (index .etcd_install_version "stdout" "etcd Version")) + - .etcd.etcd_version | semverCompare (printf ">v%s" (index .etcd_install_version "stdout" "etcd Version")) include_tasks: upgrade.yaml - name: ETCD | Expand the etcd cluster by adding new nodes if required diff --git a/builtin/core/roles/install/etcd/tasks/prepare.yaml b/builtin/core/roles/etcd/tasks/prepare.yaml similarity index 58% rename from builtin/core/roles/install/etcd/tasks/prepare.yaml rename to builtin/core/roles/etcd/tasks/prepare.yaml index ea41ef39..efa09c4b 100644 --- a/builtin/core/roles/install/etcd/tasks/prepare.yaml +++ b/builtin/core/roles/etcd/tasks/prepare.yaml @@ -1,26 +1,13 @@ - -- name: Prepare | Check etcd.service status in systemd - block: - - name: Prepare | Get etcd.service LoadState and save to variable - command: systemctl show etcd.service -p LoadState --value - register: etcd_install_LoadState - - name: Prepare | Get etcd.service ActiveState and save to variable - command: systemctl show etcd.service -p ActiveState --value - register: etcd_install_ActiveState - - name: Prepare | Ensure installed etcd is running and healthy - when: .etcd_install_LoadState.stdout | eq "loaded" - assert: - that: .etcd_install_ActiveState.stdout | eq "active" - fail_msg: >- - etcd service is installed but not running +--- +- name: Prepare | Ensure installed etcd is running and healthy + when: .etcd_install_LoadState.stdout | eq "loaded" + assert: + that: .etcd_install_ActiveState.stdout | eq "active" + fail_msg: >- + etcd service is installed but not running - name: Prepare | Set etcd node parameters block: - - name: Prepare | Set etcd state to existing if already installed - when: .etcd_install_LoadState.stdout | eq "loaded" - set_fact: - etcd: - state: existing - name: Prepare | Identify nodes with installed or missing etcd run_once: true add_hostvars: @@ -46,37 +33,32 @@ - name: Prepare | Check installed etcd version when: .etcd_install_LoadState.stdout | eq "loaded" block: - - name: Prepare | Get installed etcd version - command: etcd --version - register: etcd_install_version - register_type: yaml - name: Prepare | Ensure target etcd version is not lower than installed version when: .etcd_install_LoadState.stdout | eq "loaded" assert: that: .etcd_version | semverCompare (printf ">=v%s" (index .etcd_install_version "stdout" "etcd Version")) fail_msg: >- - Installed etcd version: {{ index .etcd_install_version "stdout" "etcd Version" }} is lower than target etcd version: {{ .etcd_version }} + Installed etcd version: {{ index .etcd_install_version "stdout" "etcd Version" }} is lower than target etcd version: {{ .etcd.etcd_version }} - name: Prepare | Synchronize etcd package to node if new install or upgrade when: - - .etcd_install_version.error | empty - - or (eq .etcd_install_version.stdout "skip") (eq .etcd_version (printf ">=v%s" (index .etcd_install_version "stdout" "etcd Version"))) + - or (.etcd_install_version.error | empty | not) (.etcd.etcd_version | semverCompare (printf ">v%s" (index .etcd_install_version "stdout" "etcd Version"))) block: - name: Prepare | Copy etcd binary package to remote node copy: src: >- - {{ .binary_dir }}/etcd/{{ .etcd_version }}/{{ .binary_type }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz + {{ .binary_dir }}/etcd/{{ .etcd.etcd_version }}/{{ .binary_type }}/etcd-{{ .etcd.etcd_version }}-linux-{{ .binary_type }}.tar.gz dest: >- - {{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz + {{ .tmp_dir }}/etcd-{{ .etcd.etcd_version }}-linux-{{ .binary_type }}.tar.gz - name: Prepare | Extract etcd binary package to /usr/local/bin/ command: | - tar --strip-components=1 -C /usr/local/bin/ -xvf {{ .tmp_dir }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type }}.tar.gz \ - --wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type }}/etcd* + tar --strip-components=1 -C /usr/local/bin/ -xvf {{ .tmp_dir }}/etcd-{{ .etcd.etcd_version }}-linux-{{ .binary_type }}.tar.gz \ + --wildcards 'etcd-{{ .etcd.etcd_version }}-linux-{{ .binary_type }}/etcd*' - name: Prepare | Synchronize certificates to node for new install or expansion when: >- or - (eq .etcd_install_version.stdout "skip") + (.etcd_install_version.error | empty | not) (and (.installed_etcd | empty | not) (.need_installed_etcd | fromJson | empty | not) @@ -85,15 +67,15 @@ - name: Prepare | Copy CA certificate to etcd node copy: src: >- - {{ .binary_dir }}/pki/root.crt + {{ .etcd.ca_file }} dest: /etc/ssl/etcd/ssl/ca.crt - name: Prepare | Copy server certificate to etcd node copy: src: >- - {{ .binary_dir }}/pki/etcd.crt + {{ .etcd.cert_file }} dest: /etc/ssl/etcd/ssl/server.crt - name: Prepare | Copy server key to etcd node copy: src: >- - {{ .binary_dir }}/pki/etcd.key + {{ .etcd.key_file }} dest: /etc/ssl/etcd/ssl/server.key diff --git a/builtin/core/roles/install/etcd/tasks/upgrade.yaml b/builtin/core/roles/etcd/tasks/upgrade.yaml similarity index 100% rename from builtin/core/roles/install/etcd/tasks/upgrade.yaml rename to builtin/core/roles/etcd/tasks/upgrade.yaml diff --git a/builtin/core/roles/install/etcd/templates/backup.sh b/builtin/core/roles/etcd/templates/backup.sh similarity index 100% rename from builtin/core/roles/install/etcd/templates/backup.sh rename to builtin/core/roles/etcd/templates/backup.sh diff --git a/builtin/core/roles/install/etcd/templates/backup.timer b/builtin/core/roles/etcd/templates/backup.timer similarity index 100% rename from builtin/core/roles/install/etcd/templates/backup.timer rename to builtin/core/roles/etcd/templates/backup.timer diff --git a/builtin/core/roles/install/etcd/templates/etcd.env b/builtin/core/roles/etcd/templates/etcd.env similarity index 86% rename from builtin/core/roles/install/etcd/templates/etcd.env rename to builtin/core/roles/etcd/templates/etcd.env index b0fb9cd9..a9a74707 100644 --- a/builtin/core/roles/install/etcd/templates/etcd.env +++ b/builtin/core/roles/etcd/templates/etcd.env @@ -1,5 +1,19 @@ +{{- $ips := list -}} +{{- $state := "new" -}} +{{- range .groups.etcd | default list -}} + {{- $internalIPv4 := index $.hostvars . "internal_ipv4" | default "" -}} + {{- $internalIPv6 := index $.hostvars . "internal_ipv6" | default "" -}} + {{- if $internalIPv4 | empty | not -}} + {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.hostvars . "hostname") $internalIPv4) -}} + {{- else if $internalIPv6 | empty | not }} + {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.hostvars . "hostname") $internalIPv6) -}} + {{- end -}} + {{ if index $.hostvars . "etcd_install_LoadState" "stdout" | eq "loaded" -}} + {{- $state := "existing" -}} + {{- end -}} +{{- end -}} ETCD_DATA_DIR={{ .etcd.env.data_dir }} -ETCD_INITIAL_CLUSTER_STATE={{ .etcd.state }} +ETCD_INITIAL_CLUSTER_STATE={{ $state }} ETCD_INITIAL_CLUSTER_TOKEN={{ .etcd.env.token }} {{- if .internal_ipv4 | empty | not }} ETCD_ADVERTISE_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }} @@ -16,16 +30,6 @@ ETCD_LISTEN_PEER_URLS={{ printf "https://%s:2380" .internal_ipv6 }} ETCD_NAME={{ .hostname }} ETCD_PROXY=off ETCD_ENABLE_V2=true -{{- $ips := list }} -{{- range .groups.etcd | default list }} - {{- $internalIPv4 := index $.hostvars . "internal_ipv4" | default "" }} - {{- $internalIPv6 := index $.hostvars . "internal_ipv6" | default "" }} - {{- if $internalIPv4 | empty | not }} - {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.hostvars . "hostname") $internalIPv4) }} - {{- else if $internalIPv6 | empty | not }} - {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.hostvars . "hostname") $internalIPv6) }} - {{- end }} -{{- end }} ETCD_INITIAL_CLUSTER={{ $ips | join "," }} ETCD_ELECTION_TIMEOUT={{ .etcd.env.election_timeout }} ETCD_HEARTBEAT_INTERVAL={{ .etcd.env.heartbeat_interval }} diff --git a/builtin/core/roles/image-registry/docker-compose/defaults/main.yaml b/builtin/core/roles/image-registry/docker-compose/defaults/main.yaml new file mode 100644 index 00000000..888b6f79 --- /dev/null +++ b/builtin/core/roles/image-registry/docker-compose/defaults/main.yaml @@ -0,0 +1,3 @@ +cri: + docker: + data_root: /var/lib/docker \ No newline at end of file diff --git a/builtin/core/roles/install/image-registry/docker-compose/files/containerd.service b/builtin/core/roles/image-registry/docker-compose/files/containerd.service similarity index 100% rename from builtin/core/roles/install/image-registry/docker-compose/files/containerd.service rename to builtin/core/roles/image-registry/docker-compose/files/containerd.service diff --git a/builtin/core/roles/install/image-registry/docker-compose/files/docker.service b/builtin/core/roles/image-registry/docker-compose/files/docker.service similarity index 100% rename from builtin/core/roles/install/image-registry/docker-compose/files/docker.service rename to builtin/core/roles/image-registry/docker-compose/files/docker.service diff --git a/builtin/core/roles/install/image-registry/docker-compose/tasks/docker.yaml b/builtin/core/roles/image-registry/docker-compose/tasks/docker.yaml similarity index 83% rename from builtin/core/roles/install/image-registry/docker-compose/tasks/docker.yaml rename to builtin/core/roles/image-registry/docker-compose/tasks/docker.yaml index cdbc5610..a5d3bf5e 100644 --- a/builtin/core/roles/install/image-registry/docker-compose/tasks/docker.yaml +++ b/builtin/core/roles/image-registry/docker-compose/tasks/docker.yaml @@ -5,21 +5,21 @@ register: docker_install_version - name: Docker | Install and configure Docker if not present or version mismatch - when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not) + when: or (.docker_install_version.error | empty | not) (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .cri.docker_version) | not) block: - name: Docker | Copy Docker binary archive to the remote node copy: src: >- - {{ .binary_dir }}/docker/{{ .docker_version }}/{{ .binary_type }}/docker-{{ .docker_version }}.tgz + {{ .binary_dir }}/docker/{{ .cri.docker_version }}/{{ .binary_type }}/docker-{{ .cri.docker_version }}.tgz dest: >- - {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz + {{ .tmp_dir }}/docker-{{ .cri.docker_version }}.tgz - name: Docker | Generate Docker configuration file template: src: daemon.json dest: /etc/docker/daemon.json - name: Docker | Extract Docker binaries to /usr/local/bin command: | - tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz --wildcards docker/* + tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .cri.docker_version }}.tgz --wildcards 'docker/*' - name: Docker | Deploy the Docker systemd service file copy: src: docker.service diff --git a/builtin/core/roles/install/image-registry/docker-compose/tasks/main.yaml b/builtin/core/roles/image-registry/docker-compose/tasks/main.yaml similarity index 89% rename from builtin/core/roles/install/image-registry/docker-compose/tasks/main.yaml rename to builtin/core/roles/image-registry/docker-compose/tasks/main.yaml index 7a106f1a..1604b841 100644 --- a/builtin/core/roles/install/image-registry/docker-compose/tasks/main.yaml +++ b/builtin/core/roles/image-registry/docker-compose/tasks/main.yaml @@ -7,7 +7,7 @@ register: dockercompose_install_version - name: DockerCompose | Install or update Docker Compose if not present or version mismatch - when: or (.dockercompose_install_version.error | empty | not) (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version)) + when: or (.dockercompose_install_version.error | empty | not) (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .image_registry.dockercompose_version)) copy: src: >- {{ .binary_dir }}/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type }}/docker-compose diff --git a/builtin/core/roles/install/image-registry/docker-compose/templates/daemon.json b/builtin/core/roles/image-registry/docker-compose/templates/daemon.json similarity index 100% rename from builtin/core/roles/install/image-registry/docker-compose/templates/daemon.json rename to builtin/core/roles/image-registry/docker-compose/templates/daemon.json diff --git a/builtin/core/roles/install/image-registry/docker-registry/defaults/main.yaml b/builtin/core/roles/image-registry/docker-registry/defaults/main.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/docker-registry/defaults/main.yaml rename to builtin/core/roles/image-registry/docker-registry/defaults/main.yaml diff --git a/builtin/core/roles/install/image-registry/docker-registry/tasks/main.yaml b/builtin/core/roles/image-registry/docker-registry/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/docker-registry/tasks/main.yaml rename to builtin/core/roles/image-registry/docker-registry/tasks/main.yaml diff --git a/builtin/core/roles/install/image-registry/docker-registry/templates/config.yaml b/builtin/core/roles/image-registry/docker-registry/templates/config.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/docker-registry/templates/config.yaml rename to builtin/core/roles/image-registry/docker-registry/templates/config.yaml diff --git a/builtin/core/roles/install/image-registry/docker-registry/templates/docker-compose.yaml b/builtin/core/roles/image-registry/docker-registry/templates/docker-compose.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/docker-registry/templates/docker-compose.yaml rename to builtin/core/roles/image-registry/docker-registry/templates/docker-compose.yaml diff --git a/builtin/core/roles/install/image-registry/docker-registry/templates/docker-registry.service b/builtin/core/roles/image-registry/docker-registry/templates/docker-registry.service similarity index 100% rename from builtin/core/roles/install/image-registry/docker-registry/templates/docker-registry.service rename to builtin/core/roles/image-registry/docker-registry/templates/docker-registry.service diff --git a/builtin/core/roles/install/image-registry/harbor/defaults/main.yaml b/builtin/core/roles/image-registry/harbor/defaults/main.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/harbor/defaults/main.yaml rename to builtin/core/roles/image-registry/harbor/defaults/main.yaml diff --git a/builtin/core/roles/install/image-registry/harbor/tasks/main.yaml b/builtin/core/roles/image-registry/harbor/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/harbor/tasks/main.yaml rename to builtin/core/roles/image-registry/harbor/tasks/main.yaml diff --git a/builtin/core/roles/install/image-registry/harbor/templates/harbor-replications.sh b/builtin/core/roles/image-registry/harbor/templates/harbor-replications.sh similarity index 100% rename from builtin/core/roles/install/image-registry/harbor/templates/harbor-replications.sh rename to builtin/core/roles/image-registry/harbor/templates/harbor-replications.sh diff --git a/builtin/core/roles/install/image-registry/harbor/templates/harbor.service b/builtin/core/roles/image-registry/harbor/templates/harbor.service similarity index 100% rename from builtin/core/roles/install/image-registry/harbor/templates/harbor.service rename to builtin/core/roles/image-registry/harbor/templates/harbor.service diff --git a/builtin/core/roles/install/image-registry/harbor/templates/harbor.yml b/builtin/core/roles/image-registry/harbor/templates/harbor.yml similarity index 100% rename from builtin/core/roles/install/image-registry/harbor/templates/harbor.yml rename to builtin/core/roles/image-registry/harbor/templates/harbor.yml diff --git a/builtin/core/roles/install/image-registry/keepalived/files/healthcheck.sh b/builtin/core/roles/image-registry/keepalived/files/healthcheck.sh similarity index 100% rename from builtin/core/roles/install/image-registry/keepalived/files/healthcheck.sh rename to builtin/core/roles/image-registry/keepalived/files/healthcheck.sh diff --git a/builtin/core/roles/install/image-registry/keepalived/tasks/main.yaml b/builtin/core/roles/image-registry/keepalived/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/image-registry/keepalived/tasks/main.yaml rename to builtin/core/roles/image-registry/keepalived/tasks/main.yaml diff --git a/builtin/core/roles/install/image-registry/keepalived/templates/keepalived.conf b/builtin/core/roles/image-registry/keepalived/templates/keepalived.conf similarity index 100% rename from builtin/core/roles/install/image-registry/keepalived/templates/keepalived.conf rename to builtin/core/roles/image-registry/keepalived/templates/keepalived.conf diff --git a/builtin/core/roles/install/image-registry/meta/main.yaml b/builtin/core/roles/image-registry/meta/main.yaml similarity index 54% rename from builtin/core/roles/install/image-registry/meta/main.yaml rename to builtin/core/roles/image-registry/meta/main.yaml index 7b5700db..4782dfa1 100644 --- a/builtin/core/roles/install/image-registry/meta/main.yaml +++ b/builtin/core/roles/image-registry/meta/main.yaml @@ -1,15 +1,15 @@ --- dependencies: - - role: install/image-registry/docker-compose + - role: image-registry/docker-compose - - role: install/image-registry/keepalived + - role: image-registry/keepalived when: - .image_registry.ha_vip | empty | not - .groups.image_registry | len | lt 1 - - role: install/image-registry/harbor + - role: image-registry/harbor when: .image_registry.type | eq "harbor" - - role: install/image-registry/docker-registry + - role: image-registry/docker-registry when: .image_registry.type | eq "docker-registry" diff --git a/builtin/core/roles/install/image-registry/tasks/main.yaml b/builtin/core/roles/image-registry/tasks/main.yaml similarity index 96% rename from builtin/core/roles/install/image-registry/tasks/main.yaml rename to builtin/core/roles/image-registry/tasks/main.yaml index 65964fee..652a6bc8 100644 --- a/builtin/core/roles/install/image-registry/tasks/main.yaml +++ b/builtin/core/roles/image-registry/tasks/main.yaml @@ -1,6 +1,5 @@ --- - name: ImageRegistry | Synchronize images to remote host - tags: ["only_image"] copy: src: >- {{ .binary_dir }}/images/ @@ -8,7 +7,7 @@ {{ .image_registry.images_dir }} - name: ImageRegistry | Ensure Harbor project exists for each image - tags: ["only_image"] + when: .image_registry.type | eq "harbor" command: | # Traverse first-level subdirectories in images_dir, skipping 'blobs' for registry_dir in {{ .image_registry.images_dir }}*; do @@ -34,10 +33,8 @@ fi done done - when: .image_registry.type | eq "harbor" - name: ImageRegistry | Push images package to image registry - tags: ["only_image"] image: push: images_dir: >- diff --git a/builtin/core/roles/init/init-artifact/defaults/main.yaml b/builtin/core/roles/init/init-artifact/defaults/main.yaml deleted file mode 100644 index a3841331..00000000 --- a/builtin/core/roles/init/init-artifact/defaults/main.yaml +++ /dev/null @@ -1,223 +0,0 @@ -work_dir: /kubekey -artifact: - arch: [ "amd64" ] - # offline artifact package for kk. - artifact_file: "" - # the md5_file of artifact_file. - artifact_md5: "" - artifact_url: - etcd: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz - {{- else -}} - https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz - {{- else -}} - https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz - {{- end -}} - kubeadm: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubeadm - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/amd64/kubeadm - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubeadm - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/arm64/kubeadm - {{- end -}} - kubelet: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubelet - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/amd64/kubelet - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubelet - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/arm64/kubelet - {{- end -}} - kubectl: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubectl - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/amd64/kubectl - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubectl - {{- else -}} - https://dl.k8s.io/release/{{ .kube_version }}/bin/linux/arm64/kubectl - {{- end -}} - cni_plugins: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://github.com/containernetworking/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni_plugins_version }}.tgz - {{- else -}} - https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni_plugins_version }}.tgz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://github.com/containernetworking/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni_plugins_version }}.tgz - {{- else -}} - https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni_plugins_version }}.tgz - {{- end -}} - helm: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-amd64.tar.gz - {{- else -}} - https://get.helm.sh/helm-{{ .helm_version }}-linux-amd64.tar.gz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-arm64.tar.gz - {{- else -}} - https://get.helm.sh/helm-{{ .helm_version }}-linux-arm64.tar.gz - {{- end -}} - crictl: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz - {{- else -}} - https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz - {{- else -}} - https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz - {{- end -}} - docker: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz - {{- else -}} - https://download.docker.com/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz - {{- else -}} - https://download.docker.com/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz - {{- end -}} - cridockerd: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz - {{- else -}} - https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz - {{- else -}} - https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz - {{- end -}} - containerd: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz - {{- else -}} - https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz - {{- else -}} - https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz - {{- end -}} - runc: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64 - {{- else -}} - https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64 - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64 - {{- else -}} - https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64 - {{- end -}} - dockercompose: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64 - {{- else -}} - https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64 - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64 - {{- else -}} - https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64 - {{- end -}} -# docker_registry: -# amd64: >- -# {{- if .kkzone | eq "cn" -}} -# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-amd64.tgz -# {{- else -}} -# https://github.com/kubesphere/kubekey/releases/download/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-amd64.tgz -# {{- end -}} -# arm64: >- -# {{- if .kkzone | eq "cn" -}} -# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-arm64.tgz -# {{- else -}} -# https://github.com/kubesphere/kubekey/releases/download/{{ .docker_registry_version }}/docker-registry-{{ .docker_registry_version }}-linux-arm64.tgz -# {{- end -}} - harbor: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz - {{- else -}} - https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz - {{- end -}} -# arm64: >- -# {{- if .kkzone | eq "cn" -}} -# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz -# {{- else -}} -# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz -# {{- end -}} -# keepalived: -# amd64: >- -# {{- if .kkzone | eq "cn" -}} -# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz -# {{- else -}} -# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz -# {{- end -}} -# arm64: >- -# {{- if .kkzone | eq "cn" -}} -# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz -# {{- else -}} -# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz -# {{- end -}} - # Notice: In the early calico helm chart, appVersion is not same as version(eg. v3.17.4) - calico: https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/tigera-operator-{{ .calico_version }}.tgz - calicoctl: - amd64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64 - {{- else -}} - https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64 - {{- end -}} - arm64: >- - {{- if .kkzone | eq "cn" -}} - https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64 - {{- else -}} - https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64 - {{- end -}} - cilium: https://helm.cilium.io/cilium-{{ .cilium_version }}.tgz - kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ .kubeovn_version }}.tgz - hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ .hybridnet_version }}/hybridnet-{{ .hybridnet_version }}.tgz - nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz - download_image: true diff --git a/builtin/core/roles/init/init-cert/defaults/main.yaml b/builtin/core/roles/init/init-cert/defaults/main.yaml deleted file mode 100644 index 7c24bd91..00000000 --- a/builtin/core/roles/init/init-cert/defaults/main.yaml +++ /dev/null @@ -1,24 +0,0 @@ -certs: - # CA cert - ca: - date: 87600h - # how to generate cert file.support: IfNotPresent, Always - gen_cert_policy: IfNotPresent - kubernetes_ca: - date: 87600h - # how to generate cert file.support: IfNotPresent, Always - gen_cert_policy: IfNotPresent - front_proxy_ca: - date: 87600h - # how to generate cert file.support: IfNotPresent, Always - gen_cert_policy: IfNotPresent - # etcd cert - etcd: - date: 87600h - # how to generate cert file.support: IfNotPresent, Always - gen_cert_policy: IfNotPresent - # image_registry cert date - image_registry: - date: 87600h - # how to generate cert file.support: IfNotPresent, Always - gen_cert_policy: IfNotPresent \ No newline at end of file diff --git a/builtin/core/roles/init/init-os/defaults/main.yaml b/builtin/core/roles/init/init-os/defaults/main.yaml deleted file mode 100644 index 9332928a..00000000 --- a/builtin/core/roles/init/init-os/defaults/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -ntp: - servers: - - "cn.pool.ntp.org" - enabled: true -timezone: Asia/Shanghai -# set hostname by inventory_host's name which defined in inventory.yaml -set_hostname: true \ No newline at end of file diff --git a/builtin/core/roles/init/init-os/tasks/init_localdns.yaml b/builtin/core/roles/init/init-os/tasks/init_localdns.yaml deleted file mode 100644 index 1f445b42..00000000 --- a/builtin/core/roles/init/init-os/tasks/init_localdns.yaml +++ /dev/null @@ -1,63 +0,0 @@ -- name: DNS | Configure local DNS entries - loop: "{{ .localDNS | toJson }}" - command: | - # Remove any previous Kubekey-managed DNS entries - sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' {{ .item }} - sed -i '/^$/N;/\n$/N;//D' {{ .item }} - # Add updated Kubekey DNS configuration - cat >> {{ .item }} <- - {{ .binary_dir }}/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type }}.iso - dest: >- - {{ .tmp_dir }}/repository.iso - - name: Kubekey Repository | Mount repository ISO to temporary directory - command: | - if [ -f "{{ .tmp_dir }}/repository.iso" ]; then - mount -t iso9660 -o loop {{ .tmp_dir }}/repository.iso {{ .tmp_dir }}/iso - fi - rescue: - - name: Kubekey Repository | Unmount repository ISO from temporary directory - command: | - if [ -f "{{ .tmp_dir }}/repository.iso" ]; then - umount {{ .tmp_dir }}/iso - fi - -- name: Kubekey Repository | Initialize package repositories and install system dependencies - block: - - name: Kubekey Repository | Initialize Debian-based repository and install required system packages - command: | - now=$(date +"%Y-%m-%d %H:%M:%S") - PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}" - PKGS_TO_INSTALL="" - for pkg in $PKGS; do - if [ -n "$pkg" ]; then - dpkg -s $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg" - fi - done - if [ -f "{{ .tmp_dir }}/repository.iso" ]; then - # Backup current APT sources - mv /etc/apt/sources.list /etc/apt/sources.list.kubekey-$now.bak - mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey-$now.bak - mkdir -p /etc/apt/sources.list.d - # Configure local repository - rm -rf /etc/apt/sources.list.d/* - echo 'deb [trusted=yes] file://{{ .tmp_dir }}/iso /' > /etc/apt/sources.list.d/kubekey.list - # Update package index - apt-get update - # Install missing packages - if [ -n "$PKGS_TO_INSTALL" ]; then - apt install -y $PKGS_TO_INSTALL - fi - # Restore original APT sources - rm -rf /etc/apt/sources.list.d - mv /etc/apt/sources.list.kubekey.bak-$now /etc/apt/sources.list - mv /etc/apt/sources.list.d.kubekey.bak-$now /etc/apt/sources.list.d - else - # No local ISO found, using default repositories - apt-get update - if [ -n "$PKGS_TO_INSTALL" ]; then - apt install -y $PKGS_TO_INSTALL - fi - fi - when: .os.release.ID_LIKE | eq "debian" - - name: Kubekey Repository | Initialize RHEL-based repository and install required system packages - command: | - now=$(date +"%Y-%m-%d %H:%M:%S") - PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}" - PKGS_TO_INSTALL="" - for pkg in $PKGS; do - if [ -n "$pkg" ]; then - rpm -q $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg" - fi - done - if [ -f "{{ .tmp_dir }}/repository.iso" ]; then - # Backup current YUM repositories - mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak-$now - mkdir -p /etc/yum.repos.d - # Configure local repository - rm -rf /etc/yum.repos.d/* - cat < /etc/yum.repos.d/CentOS-local.repo - [base-local] - name=Local RPM Repository - baseurl=file://{{ .tmp_dir }}/repository.iso - enabled=1 - gpgcheck=0 - EOF - # Refresh repository cache - yum clean all && yum makecache - # Install missing packages - if [ -n "$PKGS_TO_INSTALL" ]; then - yum install -y $PKGS_TO_INSTALL - fi - # Restore original YUM repositories - rm -rf /etc/yum.repos.d - mv /etc/yum.repos.d.kubekey.bak-$now /etc/yum.repos.d - else - # No local ISO found, using default repositories - if [ -n "$PKGS_TO_INSTALL" ]; then - yum install -y $PKGS_TO_INSTALL - fi - fi - when: .os.release.ID_LIKE | eq "\"rhel fedora\"" diff --git a/builtin/core/roles/init/init-os/tasks/main.yaml b/builtin/core/roles/init/init-os/tasks/main.yaml deleted file mode 100644 index 17298145..00000000 --- a/builtin/core/roles/init/init-os/tasks/main.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: OS | Initialize new Kubernetes nodes - when: - - .groups.k8s_cluster | default list | has .inventory_hostname - - .kubernetes_install_LoadState.stdout | eq "not-found" - block: - - include_tasks: init_repository.yaml - - name: OS | Reset temporary directory - command: | - if [ -d {{ .tmp_dir }} ]; then - rm -rf {{ .tmp_dir }} - fi - mkdir -m 777 -p {{ .tmp_dir }} - - name: OS | Set system hostname - command: | - hostnamectl set-hostname {{ .inventory_hostname }} \ - && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_hostname }}/g' {{ .item }} - when: - - .set_hostname - - .inventory_hostname | ne "localhost" - loop: "{{ .localDNS | toJson }}" - - name: OS | Synchronize initialization script to remote node - template: - src: init-os.sh - dest: /etc/kubekey/scripts/init-os.sh - mode: 0755 - - name: OS | Execute initialization script on remote node - command: | - /etc/kubekey/scripts/init-os.sh - -- name: OS | Always perform initialization steps for all nodes - block: - - include_tasks: init_ntpserver.yaml - - include_tasks: init_localdns.yaml \ No newline at end of file diff --git a/builtin/core/roles/install/cni/calico/defaults/main.yaml b/builtin/core/roles/install/cni/calico/defaults/main.yaml deleted file mode 100644 index 1beb801c..00000000 --- a/builtin/core/roles/install/cni/calico/defaults/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -cni: - calico: - values: | - # calico helm values - tigeraOperator: - registry: {{ .quayio_registry }} - calicoctl: - image: {{ .dockerio_registry }}/calico/ctl - installation: - registry: {{ .dockerio_registry }} - calicoNetwork: - bgp: Enabled \ No newline at end of file diff --git a/builtin/core/roles/install/cni/calico/tasks/main.yaml b/builtin/core/roles/install/cni/calico/tasks/main.yaml deleted file mode 100644 index 93fb6649..00000000 --- a/builtin/core/roles/install/cni/calico/tasks/main.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Calico | calico-check-calicoctl-installed - ignore_errors: true - command: calicoctl version - register: calicoctl_install_version - register_type: yaml - -- name: Calico | calico-install-calicoctl-if-missing - when: .calicoctl_install_version.error | empty | not - block: - - name: Calico | calico-sync-calicoctl-to-remote - copy: - src: >- - {{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .binary_type }}/calicoctl - dest: /usr/local/bin/calicoctl - mode: 0755 - -- name: Calico | calico-sync-calico-package-to-remote - copy: - src: >- - {{ .binary_dir }}/cni/calico/tigera-operator-{{ .calico_version }}.tgz - dest: >- - /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz - -- name: Calico | calico-generate-custom-values-file - copy: - content: | - {{ .cni.calico.values }} - dest: /etc/kubernetes/cni/calico-values.yaml - -- name: Calico | calico-apply-helm-chart - command: | - helm upgrade --install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml diff --git a/builtin/core/roles/install/cni/cilium/tasks/main.yaml b/builtin/core/roles/install/cni/cilium/tasks/main.yaml deleted file mode 100644 index bfdc48f4..00000000 --- a/builtin/core/roles/install/cni/cilium/tasks/main.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Cilium | Ensure cilium CLI package is present - when: .ciliumcli_version | empty | not - copy: - src: >- - {{ .binary_dir }}/cni/cilium/ciliumcli-{{ .ciliumcli_version }}/{{ .item }} - dest: /usr/local/bin/cilium - -- name: Cilium | Ensure cilium Helm chart package is present - copy: - src: >- - {{ .binary_dir }}/cni/cilium/cilium-{{ .cilium_version }}.tgz - dest: >- - /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz - -- name: Cilium | Generate cilium Helm custom values file - copy: - content: | - {{ .cni.cilium.values }} - dest: /etc/kubernetes/cni/cilium-values.yaml - -# Reference: https://docs.cilium.io/en/stable/installation/k8s-install-helm/ -- name: Cilium | Install cilium using Helm - command: | - helm upgrade --install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml diff --git a/builtin/core/roles/install/cni/defaults/main.yaml b/builtin/core/roles/install/cni/defaults/main.yaml deleted file mode 100644 index 4e9064ad..00000000 --- a/builtin/core/roles/install/cni/defaults/main.yaml +++ /dev/null @@ -1,39 +0,0 @@ -cni: - type: >- - {{ .kubernetes.kube_network_plugin | default "calico" }} - # Multus CNI is a container network interface (CNI) plugin for Kubernetes that enables attaching multiple network interfaces to pods - multus: - # if install multus thick plugins. - enabled: false - image: - registry: >- - {{ .dockerio_registry }} - repository: kubesphere/multus-cni - tag: v3.8 - # In Kubernetes, the Pod CIDR supports both IPv4 and IPv6 configurations. It can be specified as follows: - # "Single-stack IPv4": the pod_cidr value format "ipv4" - # "Single-stack IPv6": the pod_cidr value format "ipv6" - # "Dual-stack (IPv4 and IPv6)": the pod_cidr value format "ipv4,ipv6" - ipv4_support: >- - {{ eq (.kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily) "IPv4" }} - ipv4_pods_cidr: >- - {{- if eq (.kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily) "IPv4" -}} - {{ .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first }} - {{- end -}} - # kubernetes.networking.ipv4_mask_size: determines the size of the IP block allocated to each node from the clusterCIDR. - # It controls how many IP addresses Kubernetes will assign to each node for Pod usage. - # cni.ipv4_block_size: defines the size of the IP block that cni reserves for each node. - # To avoid wasting IP addresses and to make management simpler, it’s best to align maskSize and blockSize as closely as possible - ipv4_block_size: >- - {{ .kubernetes.networking.ipv4_mask_size | default 24 }} - ipv6_support: >- - {{- eq (.kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily) "IPv6" }} - ipv6_pods_cidr: >- - {{- if eq (.kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily) "IPv6" -}} - {{ .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last }} - {{- end -}} - ipv6_block_size: >- - {{ .kubernetes.networking.ipv4_mask_size | default 64 }} - kube_svc_cidr: >- - {{ .kubernetes.networking.service_cidr | default "10.233.0.0/18" }} - diff --git a/builtin/core/roles/install/cni/meta/main.yaml b/builtin/core/roles/install/cni/meta/main.yaml deleted file mode 100644 index e70fa639..00000000 --- a/builtin/core/roles/install/cni/meta/main.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -dependencies: - - role: install/cni/multus - when: .cni.multus.enabled - - - role: install/cni/calico - when: .cni.type | eq "calico" - - - role: install/cni/cilium - when: .cni.type | eq "cilium" - - - role: install/cni/flannel - when: .cni.type | eq "flannel" - - - role: install/cni/kubeovn - when: .cni.type | eq "kubeovn" - - - role: install/cni/hybridnet - when: .cni.type | eq "hyvbridnet" diff --git a/builtin/core/roles/install/cri/defaults/main.yaml b/builtin/core/roles/install/cri/defaults/main.yaml deleted file mode 100644 index d13fefcb..00000000 --- a/builtin/core/roles/install/cri/defaults/main.yaml +++ /dev/null @@ -1,44 +0,0 @@ -cri: - # support: systemd, cgroupfs - cgroup_driver: systemd - sandbox_image: - registry: >- - {{ .dockerio_registry }} - repository: kubesphere/pause - tag: 3.5 - # support: containerd,docker - # container_manager: docker - # the endpoint of containerd - cri_socket: >- - {{- if .cri.container_manager | eq "containerd" -}} - unix:///var/run/containerd/containerd.sock - {{- else if and (.cri.container_manager | eq "docker") (.kube_version | semverCompare ">=v1.24.0") -}} - unix:///var/run/cri-dockerd.sock - {{- end -}} - containerd: - data_root: /var/lib/containerd - docker: - data_root: /var/lib/docker - registry: - mirrors: ["https://registry-1.docker.io"] - insecure_registries: [] - auths: [] - -image_registry: - # ha_vip: 192.168.122.59 - auth: - registry: >- - {{- if .image_registry.ha_vip | empty | not -}} - {{ .image_registry.ha_vip }} - {{- else if .groups.image_registry | default list | empty | not -}} - {{- $internalIPv4 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv4" | default "" -}} - {{- $internalIPv6 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv6" | default "" -}} - {{- if $internalIPv4 | empty | not -}} - {{ $internalIPv4 }} - {{- else if $internalIPv6 | empty | not -}} - {{ $internalIPv6 }} - {{- end -}} - {{- end -}} - username: admin - password: Harbor12345 - insecure: true diff --git a/builtin/core/roles/install/cri/meta/main.yaml b/builtin/core/roles/install/cri/meta/main.yaml deleted file mode 100644 index 631fbb6c..00000000 --- a/builtin/core/roles/install/cri/meta/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ -dependencies: - - role: install/cri/crictl - - - role: install/cri/docker - when: .cri.container_manager | eq "docker" - - - role: install/cri/containerd - when: .cri.container_manager | eq "containerd" \ No newline at end of file diff --git a/builtin/core/roles/install/etcd/defaults/main.yaml b/builtin/core/roles/install/etcd/defaults/main.yaml deleted file mode 100644 index 54fda277..00000000 --- a/builtin/core/roles/install/etcd/defaults/main.yaml +++ /dev/null @@ -1,27 +0,0 @@ -etcd: - # endpoints: ["https://127.1.1.1:2379"] - # etcd binary - state: new -# env config - env: - election_timeout: 5000 - heartbeat_interval: 250 - compaction_retention: 8 - snapshot_count: 10000 - data_dir: /var/lib/etcd - token: k8s_etcd -# metrics: basic -# quota_backend_bytes: 100 -# max_request_bytes: 100 -# max_snapshots: 100 -# max_wals: 5 -# log_level: info -# unsupported_arch: arm64 -# backup config - backup: - backup_dir: /var/lib/etcd-backup - keep_backup_number: 5 - etcd_backup_script: "backup.sh" - on_calendar: "*-*-* *:00/30:00" - performance: false - traffic_priority: false diff --git a/builtin/core/roles/install/image-registry/defaults/main.yaml b/builtin/core/roles/install/image-registry/defaults/main.yaml deleted file mode 100644 index 3c6b8ee1..00000000 --- a/builtin/core/roles/install/image-registry/defaults/main.yaml +++ /dev/null @@ -1,22 +0,0 @@ -image_registry: - # registry type. support: harbor, docker-registry - type: harbor - # ha_vip: 192.168.122.59 - # which store images data which will push to registry. - images_dir: >- - {{ .tmp_dir }}/images/ - auth: - registry: >- - {{- if .image_registry.ha_vip | empty | not -}} - {{ .image_registry.ha_vip }} - {{- else if .groups.image_registry | default list | empty | not -}} - {{- $internalIPv4 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv4" | default "" -}} - {{- $internalIPv6 := index .hostvars (.groups.image_registry | default list | first) "internal_ipv6" | default "" -}} - {{- if $internalIPv4 | empty | not -}} - {{ $internalIPv4 }} - {{- else if $internalIPv6 | empty | not -}} - {{ $internalIPv6 }} - {{- end -}} - {{- end -}} - username: admin - password: Harbor12345 diff --git a/builtin/core/roles/install/image-registry/docker-compose/defaults/main.yaml b/builtin/core/roles/install/image-registry/docker-compose/defaults/main.yaml deleted file mode 100644 index 42d27a1c..00000000 --- a/builtin/core/roles/install/image-registry/docker-compose/defaults/main.yaml +++ /dev/null @@ -1,5 +0,0 @@ -cri: - docker: - data_root: /var/lib/docker - containerd: - data_root: /var/lib/containerd \ No newline at end of file diff --git a/builtin/core/roles/install/storageclass/local/defaults/main.yaml b/builtin/core/roles/install/storageclass/local/defaults/main.yaml deleted file mode 100644 index 23b31daf..00000000 --- a/builtin/core/roles/install/storageclass/local/defaults/main.yaml +++ /dev/null @@ -1,15 +0,0 @@ -sc: - local: - enabled: true - default: true - provisioner_image: - registry: >- - {{ .dockerio_registry }} - repository: openebs/provisioner-localpv - tag: 3.3.0 - linux_utils_image: - registry: >- - {{ .dockerio_registry }} - repository: openebs/linux-utils - tag: 3.3.0 - path: /var/openebs/local \ No newline at end of file diff --git a/builtin/core/roles/install/storageclass/meta/main.yaml b/builtin/core/roles/install/storageclass/meta/main.yaml deleted file mode 100644 index 4fd0f9dd..00000000 --- a/builtin/core/roles/install/storageclass/meta/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -dependencies: - - role: install/storageclass/local - when: .sc.local.enabled - - - role: install/storageclass/nfs - when: .sc.nfs.enabled diff --git a/builtin/core/roles/install/storageclass/nfs/defaults/main.yaml b/builtin/core/roles/install/storageclass/nfs/defaults/main.yaml deleted file mode 100644 index a61a9949..00000000 --- a/builtin/core/roles/install/storageclass/nfs/defaults/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -sc: - nfs: # each k8s_cluster node should install nfs-utils - enabled: false - default: false - server: >- - {{ .groups.nfs | default list | first }} - path: /share/kubernetes diff --git a/builtin/core/roles/kubernetes/certs/defaults/main.yaml b/builtin/core/roles/kubernetes/certs/defaults/main.yaml deleted file mode 100644 index 9c216272..00000000 --- a/builtin/core/roles/kubernetes/certs/defaults/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ -renew_certs: - enabled: false - is_kubeadm_alpha: >- - {{- if .kube_version | semverCompare "- - {{- if .kube_version | semverCompare ">=v1.24.0" -}} + {{- if .kubernetes.kube_version | semverCompare ">=v1.24.0" -}} kubeadm/kubeadm-init.v1beta3 {{- else -}} kubeadm/kubeadm-init.v1beta2 @@ -13,7 +13,7 @@ block: - name: Init | Pre-initialization for kube-vip when: - - .kube_version | semverCompare ">=v1.29.0" + - .kubernetes.kube_version | semverCompare ">=v1.29.0" - eq .kubernetes.control_plane_endpoint.type "kube_vip" command: | sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ @@ -23,7 +23,7 @@ /usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }} - name: Init | Post-initialization for kube-vip when: - - .kube_version | semverCompare ">=v1.29.0" + - .kubernetes.kube_version | semverCompare ">=v1.29.0" - eq .kubernetes.control_plane_endpoint.type "kube_vip" command: | sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' \ @@ -33,15 +33,15 @@ # This ensures the control_plane_endpoint resolves locally before kube-vip is running, # preventing failures for tasks that execute kubectl apply on the current node. - name: Init | Reset local DNS for control_plane_endpoint + loop: "{{ .native.localDNS | toJson }}" command: | - sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey kubernetes control_plane_endpoint BEGIN.*# kubekey kubernetes control_plane_endpoint END@@' {{ .item }} cat >> {{ .item }} <- - {{- $initNodes := list -}} - {{- $notInitNodes := list -}} - {{- range .groups.kube_control_plane -}} - {{- if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "loaded" -}} - {{- $initNodes = append $initNodes . -}} - {{- else if index $.hostvars . "kubernetes_install_LoadState" "stdout" | eq "not-found" -}} - {{- $notInitNodes = append $notInitNodes . -}} - {{- end -}} - {{- end -}} - {{- if $initNodes | len | eq 1 -}} - {{ $initNodes | first }} - {{- else if $initNodes | len | lt 1 -}} - {{ index $initNodes (randInt 0 ((sub ($initNodes | len) 1) | int)) }} - {{- else if $notInitNodes | len | eq 1 -}} - {{ $notInitNodes | first }} - {{- else if $notInitNodes | len | lt 1 -}} - {{ index $notInitNodes (randInt 0 ((sub ($notInitNodes | len) 1) | int)) }} - {{- end -}} - - name: InitKubernetes | Configure control_plane_endpoint in local DNS files when: - or (.kubernetes.control_plane_endpoint.type | eq "local") (.kubernetes.control_plane_endpoint.type | eq "haproxy") - .inventory_hostname | eq .init_kubernetes_node | not + loop: "{{ .native.localDNS | toJson }}" command: | - sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }} + sed -i ':a;$!{N;ba};s@# kubekey kubernetes control_plane_endpoint BEGIN.*# kubekey kubernetes control_plane_endpoint END@@' {{ .item }} cat >> {{ .item }} < /etc/kubernetes/kubeadm-config.yaml + fi - name: InitKubernetes | Fetch kubeconfig to local workspace fetch: src: /etc/kubernetes/admin.conf diff --git a/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/coredns.yaml b/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/coredns.yaml index 2475defd..4b1131dd 100644 --- a/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/coredns.yaml +++ b/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/coredns.yaml @@ -14,7 +14,7 @@ metadata: prometheus.io/scrape: "true" createdby: 'kubekey' spec: - clusterIP: {{ .kubernetes.networking.dns_service_ip }} + clusterIP: {{ .dns.dns_service_ip }} selector: k8s-app: kube-dns ports: @@ -38,7 +38,7 @@ metadata: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | - {{- range .kubernetes.coredns.zone_configs }} + {{- range .dns.coredns.zone_configs }} {{ .zones | join " " }} { cache {{ .cache }} {{- range .additional_configs }} @@ -99,7 +99,7 @@ data: } {{- end }} - {{- if $.kubernetes.coredns.dns_etc_hosts | empty | not }} + {{- if $.dns.dns_etc_hosts | empty | not }} hosts /etc/coredns/hosts { fallthrough } @@ -107,9 +107,9 @@ data: } {{- end }} -{{- if .kubernetes.coredns.dns_etc_hosts | empty | not }} +{{- if .dns.coredns.dns_etc_hosts | empty | not }} hosts: | - {{- range .kubernetes.coredns.dns_etc_hosts }} + {{- range .dns.coredns.dns_etc_hosts }} {{ . }} {{- end }} {{- end }} diff --git a/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/nodelocaldns.yaml b/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/nodelocaldns.yaml index 3fadfcf7..33815be2 100644 --- a/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/nodelocaldns.yaml +++ b/builtin/core/roles/kubernetes/init-kubernetes/templates/dns/nodelocaldns.yaml @@ -24,9 +24,6 @@ spec: metadata: labels: k8s-app: nodelocaldns - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '9253' spec: nodeSelector: kubernetes.io/os: linux @@ -43,7 +40,7 @@ spec: operator: "Exists" containers: - name: node-cache - image: {{ .kubernetes.networking.dns_cache_image.registry }}/{{ .kubernetes.networking.dns_cache_image.repository }}:{{ .kubernetes.networking.dns_cache_image.tag }} + image: {{ .dns.dns_cache_image.registry }}/{{ .dns.dns_cache_image.repository }}:{{ .dns.dns_cache_image.tag }} resources: limits: memory: 200Mi @@ -52,12 +49,12 @@ spec: memory: 70Mi args: - -localip - - {{ .kubernetes.networking.clusterDNS }} + - {{ .dns.dns_cache_ip }} - -conf - /etc/coredns/Corefile - -upstreamsvc - coredns - - metrics-listen-address + - -metrics-listen-address - 127.0.0.1:9353 securityContext: privileged: true @@ -68,12 +65,9 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP - - containerPort: 9253 - name: metrics - protocol: TCP livenessProbe: httpGet: - host: {{ .kubernetes.networking.clusterDNS }} + host: {{ .dns.dns_cache_ip }} path: /health port: 9254 scheme: HTTP @@ -82,7 +76,7 @@ spec: failureThreshold: 10 readinessProbe: httpGet: - host: {{ .kubernetes.networking.clusterDNS }} + host: {{ .dns.dns_cache_ip }} path: /health port: 9254 scheme: HTTP @@ -120,7 +114,7 @@ metadata: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | - {{- range .kubernetes.coredns.external_zones }} + {{- range .dns.coredns.external_zones }} {{ .zones | join " " }}{ log errors @@ -128,7 +122,7 @@ data: cache {{ .cache }} reload loop - bind {{ .kubernetes.networking.clusterDNS }} + bind {{ .kubernetes.networking.dns_cache_ip }} {{- range .rewrite }} rewrite {{ .rule }} { @@ -176,7 +170,7 @@ data: } {{- end }} - {{ .kubernetes.networking.dns_domain }}:53 { + {{ .dns.dns_domain }}:53 { errors cache { success 9984 30 @@ -184,19 +178,19 @@ data: } reload loop - bind {{ .kubernetes.networking.clusterDNS }} - forward . {{ .kubernetes.networking.dns_service_ip }} { + bind {{ .dns.dns_cache_ip }} + forward . {{ .dns.dns_service_ip }} { force_tcp } - health {{ .kubernetes.networking.clusterDNS }}:9254 + health {{ .dns.dns_cache_ip }}:9254 } in-addr.arpa:53 { errors cache 30 reload loop - bind {{ .kubernetes.networking.clusterDNS }} - forward . {{ .kubernetes.networking.dns_service_ip }} { + bind {{ .dns.dns_cache_ip }} + forward . {{ .dns.dns_service_ip }} { force_tcp } } @@ -205,8 +199,8 @@ data: cache 30 reload loop - bind {{ .kubernetes.networking.clusterDNS }} - forward . {{ .kubernetes.networking.dns_service_ip }} { + bind {{ .dns.dns_cache_ip }} + forward . {{ .dns.dns_service_ip }} { force_tcp } } @@ -215,18 +209,18 @@ data: cache 30 reload loop - bind {{ .kubernetes.networking.clusterDNS }} + bind {{ .dns.dns_cache_ip }} forward . /etc/resolv.conf - {{- if .kubernetes.coredns.dns_etc_hosts | empty | not }} + {{- if .dns.dns_etc_hosts | empty | not }} hosts /etc/coredns/hosts { fallthrough } {{- end }} } -{{- if .kubernetes.coredns.dns_etc_hosts | empty | not }} +{{- if .dns.coredns.dns_etc_hosts | empty | not }} hosts: | - {{- range .kubernetes.coredns.dns_etc_hosts }} + {{- range .dns.coredns.dns_etc_hosts }} {{ . }} {{- end }} {{- end }} diff --git a/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta2 b/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta2 index 39735737..ebb12725 100644 --- a/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta2 +++ b/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta2 @@ -4,10 +4,10 @@ apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration etcd: -{{- if .kubernetes.etcd.deployment_type | eq "internal" }} +{{- if .etcd.deployment_type | eq "internal" }} local: - imageRepository: {{ .kubernetes.etcd.image.registry }} - imageTag: {{ .kubernetes.etcd.image.tag }} + imageRepository: {{ .etcd.image.registry }} + imageTag: {{ .etcd.image.tag }} serverCertSANs: {{- range .groups.etcd | default list }} - {{ index $.hostvars . "internal_ipv4" }} @@ -24,17 +24,17 @@ etcd: {{- end }} dns: type: CoreDNS - imageRepository: {{ .kubernetes.networking.dns_image.registry }}/{{ .kubernetes.networking.dns_image.repository }} - imageTag: {{ .kubernetes.networking.dns_image.tag }} + imageRepository: {{ .dns.dns_image.registry }}/{{ .dns.dns_image.repository }} + imageTag: {{ .dns.dns_image.tag }} imageRepository: {{ .kubernetes.image_repository }} -kubernetesVersion: {{ .kube_version }} +kubernetesVersion: {{ .kubernetes.kube_version }} certificatesDir: /etc/kubernetes/pki clusterName: {{ .kubernetes.cluster_name }} controlPlaneEndpoint: {{ .kubernetes.control_plane_endpoint.host }} networking: - dnsDomain: {{ .kubernetes.networking.dns_domain }} - podSubnet: {{ .kubernetes.networking.pod_cidr }} - serviceSubnet: {{ .kubernetes.networking.service_cidr }} + dnsDomain: {{ .dns.dns_domain }} + podSubnet: {{ .cni.pod_cidr }} + serviceSubnet: {{ .cni.service_cidr }} apiServer: extraArgs: {{- if $internalIPv4 | empty | not }} @@ -51,7 +51,7 @@ apiServer: tls-min-version: VersionTLS12 tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 {{- end }} -{{- if .kubernetes.audit }} +{{- if .audit }} audit-log-format: json audit-log-maxbackup: 2 audit-log-maxsize: 200 @@ -69,13 +69,13 @@ apiServer: - kubernetes.default - kubernetes.default.svc - kubernetes.default.svc.{{ .kubernetes.cluster_name }} - - kubernetes.default.svc.{{ .kubernetes.cluster_name }}.{{ .kubernetes.networking.dns_domain }} - - {{ index (.kubernetes.networking.service_cidr | ipInCIDR) 0 }} + - kubernetes.default.svc.{{ .kubernetes.cluster_name }}.{{ .dns.dns_domain }} + - {{ index (.cni.service_cidr | ipInCIDR) 0 }} - {{ .kubernetes.control_plane_endpoint.host }} {{- range .groups.k8s_cluster | default list }} - {{ index $.hostvars . "hostname" }} - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }} - - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }}.{{ $.kubernetes.networking.dns_domain }} + - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }}.{{ $.dns.dns_domain }} {{- $internalIPv4 := index $.hostvars . "internal_ipv4" | default "" }} {{- $internalIPv6 := index $.hostvars . "internal_ipv6" | default "" }} {{- if $internalIPv4 | empty | not }} @@ -88,7 +88,7 @@ apiServer: {{- range .kubernetes.apiserver.certSANs }} - {{ . }} {{- end }} -{{- if .kubernetes.audit }} +{{- if .audit }} extraVolumes: - name: k8s-audit hostPath: /etc/kubernetes/audit @@ -97,11 +97,11 @@ apiServer: {{- end }} controllerManager: extraArgs: -{{- if eq (.kubernetes.networking.pod_cidr | splitList "," | first | ipFamily) "IPv4" }} - node-cidr-mask-size-ipv4: "{{ .kubernetes.networking.ipv4_mask_size }}" +{{- if eq (.cni.pod_cidr | splitList "," | first | ipFamily) "IPv4" }} + node-cidr-mask-size-ipv4: "{{ .cni.ipv4_mask_size }}" {{- end }} -{{- if eq (.kubernetes.networking.pod_cidr | splitList "," | last | ipFamily) "IPv6" }} - node-cidr-mask-size-ipv6: "{{ .kubernetes.networking.ipv6_mask_size }}" +{{- if eq (.cni.pod_cidr | splitList "," | last | ipFamily) "IPv6" }} + node-cidr-mask-size-ipv6: "{{ .cni.ipv6_mask_size }}" {{- end }} {{- if .security_enhancement }} {{- if $internalIPv4 | empty | not }} @@ -165,7 +165,7 @@ nodeRegistration: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: {{ .kubernetes.networking.pod_cidr }} +clusterCIDR: {{ .cni.pod_cidr }} mode: {{ .kubernetes.kube_proxy.mode }} {{- if .kubernetes.kube_proxy.config | empty | not }} {{ .kubernetes.kube_proxy.config | toYaml }} @@ -173,10 +173,10 @@ mode: {{ .kubernetes.kube_proxy.mode }} --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration -clusterDomain: {{ .kubernetes.networking.dns_domain }} +clusterDomain: {{ .dns.dns_domain }} clusterDNS: - - {{ .kubernetes.networking.clusterDNS }} -maxPods: {{ .kubernetes.max_pods }} + - {{ .dns.dns_cache_ip }} +maxPods: {{ .cni.max_pods }} podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }} rotateCertificates: true kubeReserved: diff --git a/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta3 b/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta3 index d65fb282..b594d9d3 100644 --- a/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta3 +++ b/builtin/core/roles/kubernetes/init-kubernetes/templates/kubeadm/kubeadm-init.v1beta3 @@ -4,13 +4,13 @@ apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration etcd: -{{- if .kubernetes.etcd.deployment_type | eq "internal" }} +{{- if .etcd.deployment_type | eq "internal" }} local: - imageRepository: {{ .kubernetes.etcd.image.registry }} - imageTag: {{ .kubernetes.etcd.image.tag }} + imageRepository: {{ .etcd.image.registry }} + imageTag: {{ .etcd.image.tag }} serverCertSANs: {{- range .groups.etcd | default list }} - - {{ index $.hostvars . "internal_ipv4" }} + - {{ index $.hostvars . "internal_ipv4" }} {{- end }} {{- else }} external: @@ -23,17 +23,17 @@ etcd: keyFile: /etc/kubernetes/pki/etcd/client.key {{- end }} dns: - imageRepository: {{ .kubernetes.networking.dns_image.registry }}/{{ .kubernetes.networking.dns_image.repository }} - imageTag: {{ .kubernetes.networking.dns_image.tag }} + imageRepository: {{ .dns.dns_image.registry }}/{{ .dns.dns_image.repository }} + imageTag: {{ .dns.dns_image.tag }} imageRepository: {{ .kubernetes.image_repository }} -kubernetesVersion: {{ .kube_version }} +kubernetesVersion: {{ .kubernetes.kube_version }} certificatesDir: /etc/kubernetes/pki clusterName: {{ .kubernetes.cluster_name }} controlPlaneEndpoint: {{ .kubernetes.control_plane_endpoint.host }} networking: - dnsDomain: {{ .kubernetes.networking.dns_domain }} - podSubnet: {{ .kubernetes.networking.pod_cidr }} - serviceSubnet: {{ .kubernetes.networking.service_cidr }} + dnsDomain: {{ .dns.dns_domain }} + podSubnet: {{ .cni.pod_cidr }} + serviceSubnet: {{ .cni.service_cidr }} apiServer: extraArgs: {{- if $internalIPv4 | empty | not }} @@ -50,7 +50,7 @@ apiServer: tls-min-version: VersionTLS12 tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 {{- end }} -{{- if .kubernetes.audit }} +{{- if .audit }} audit-log-format: json audit-log-maxbackup: 2 audit-log-maxsize: 200 @@ -68,13 +68,13 @@ apiServer: - kubernetes.default - kubernetes.default.svc - kubernetes.default.svc.{{ .kubernetes.cluster_name }} - - kubernetes.default.svc.{{ .kubernetes.cluster_name }}.{{ .kubernetes.networking.dns_domain }} - - {{ index (.kubernetes.networking.service_cidr | ipInCIDR) 0 }} + - kubernetes.default.svc.{{ .kubernetes.cluster_name }}.{{ .dns.dns_domain }} + - {{ index (.cni.service_cidr | ipInCIDR) 0 }} - {{ .kubernetes.control_plane_endpoint.host }} {{- range .groups.k8s_cluster | default list }} - {{ index $.hostvars . "hostname" }} - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }} - - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }}.{{ $.kubernetes.networking.dns_domain }} + - {{ index $.hostvars . "hostname" }}.{{ $.kubernetes.cluster_name }}.{{ $.dns.dns_domain }} {{- $internalIPv4 := index $.hostvars . "internal_ipv4" | default "" }} {{- $internalIPv6 := index $.hostvars . "internal_ipv6" | default "" }} {{- if $internalIPv4 | empty | not }} @@ -87,7 +87,7 @@ apiServer: {{- range .kubernetes.apiserver.certSANs }} - {{ . }} {{- end }} -{{- if .kubernetes.audit }} +{{- if .audit }} extraVolumes: - name: k8s-audit hostPath: /etc/kubernetes/audit @@ -96,11 +96,11 @@ apiServer: {{- end }} controllerManager: extraArgs: -{{- if eq (.kubernetes.networking.pod_cidr | splitList "," | first | ipFamily) "IPv4" }} - node-cidr-mask-size-ipv4: "{{ .kubernetes.networking.ipv4_mask_size }}" +{{- if eq (.cni.pod_cidr | splitList "," | first | ipFamily) "IPv4" }} + node-cidr-mask-size-ipv4: "{{ .cni.ipv4_mask_size }}" {{- end }} -{{- if eq (.kubernetes.networking.pod_cidr | splitList "," | last | ipFamily) "IPv6" }} - node-cidr-mask-size-ipv6: "{{ .kubernetes.networking.ipv6_mask_size }}" +{{- if eq (.cni.pod_cidr | splitList "," | last | ipFamily) "IPv6" }} + node-cidr-mask-size-ipv6: "{{ .cni.ipv6_mask_size }}" {{- end }} {{- if .security_enhancement }} {{- if $internalIPv4 | empty | not }} @@ -118,7 +118,7 @@ controllerManager: bind-address: :: {{- end }} {{- end }} -{{- if .kubernetes.controller_manager.extra_args | empty | not }} +{{- if .kubernetes.controller_manager.extra_args }} {{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }} {{- end }} extraVolumes: @@ -142,7 +142,7 @@ scheduler: bind-address: :: {{- end }} {{- end }} -{{- if .kubernetes.scheduler.extra_args | empty | not }} +{{- if .kubernetes.scheduler.extra_args }} {{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }} {{- end }} --- @@ -164,18 +164,18 @@ nodeRegistration: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clusterCIDR: {{ .kubernetes.networking.pod_cidr }} +clusterCIDR: {{ .cni.pod_cidr }} mode: {{ .kubernetes.kube_proxy.mode }} -{{- if .kubernetes.kube_proxy.config }} +{{- if .kubernetes.kube_proxy.config | empty | not }} {{ .kubernetes.kube_proxy.config | toYaml }} {{- end }} --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration -clusterDomain: {{ .kubernetes.networking.dns_domain }} +clusterDomain: {{ .dns.dns_domain }} clusterDNS: - - {{ .kubernetes.networking.clusterDNS }} -maxPods: {{ .kubernetes.max_pods }} + - {{ .dns.dns_cache_ip }} +maxPods: {{ .cni.max_pods }} podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }} rotateCertificates: true kubeReserved: diff --git a/builtin/core/roles/kubernetes/join-kubernetes/tasks/join_kubernetes.yaml b/builtin/core/roles/kubernetes/join-kubernetes/tasks/join_kubernetes.yaml deleted file mode 100644 index ddc3f6e0..00000000 --- a/builtin/core/roles/kubernetes/join-kubernetes/tasks/join_kubernetes.yaml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Join | Generate kubeadm join configuration file - template: - src: >- - {{- if .kube_version | semverCompare ">=v1.24.0" -}} - kubeadm/kubeadm-join.v1beta3 - {{- else -}} - kubeadm/kubeadm-join.v1beta2 - {{- end -}} - dest: /etc/kubernetes/kubeadm-config.yaml - -- name: Join | Execute kubeadm join to add node to the Kubernetes cluster - command: | - /usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull - -- name: Join | Synchronize kubeconfig to remote node - copy: - src: >- - {{ .work_dir }}/kubekey/kubeconfig - dest: /root/.kube/config - -- name: Join | Configure node as worker - when: .groups.kube_worker | default list | has .inventory_hostname - block: - - name: Join | Remove master and control-plane taints from node - ignore_errors: true - command: | - /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule- - /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule- - - name: Join | Add worker label to node - command: | - /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker= - -- name: Join | Add custom annotations to node - when: .annotations | empty | not - command: | - kubectl annotate {{ .hostname }} {{- range $k,$v := .annotations }}{{ printf "%s=%s" $k $v}} {{- end }} - -# Reset local DNS for control_plane_endpoint to 127.0.0.1 and ::1. -# This ensures the control_plane_endpoint resolves locally before kube-vip is running, -# preventing failures for tasks that execute kubectl apply on the current node. -- name: Join | Reset local DNS for control_plane_endpoint - block: - - name: Join | Reset local DNS on control plane nodes - when: - - .groups.kube_control_plane | default list | has .inventory_hostname - command: | - sed -i ':a;$!{N;ba};s@# kubekey control_plane_endpoint BEGIN.*# kubekey control_plane_endpoint END@@' {{ .item }} - cat >> {{ .item }} <> {{ .item }} <- + {{- if .kubernetes.kube_version | semverCompare ">=v1.24.0" -}} + kubeadm/kubeadm-join.v1beta3 + {{- else -}} + kubeadm/kubeadm-join.v1beta2 + {{- end -}} + dest: /etc/kubernetes/kubeadm-config.yaml + +- name: Join | Execute kubeadm join to add node to the Kubernetes cluster + command: | + /usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull + +- name: Join | Synchronize kubeconfig to remote node + copy: + src: >- + {{ .work_dir }}/kubekey/kubeconfig + dest: /root/.kube/config + +- name: Join | Configure node as worker + when: .groups.kube_worker | default list | has .inventory_hostname + block: + - name: Join | Remove master and control-plane taints from node + ignore_errors: true + command: | + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule- + /usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule- + - name: Join | Add worker label to node + command: | + /usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker= + +- name: Join | Add custom annotations to node + when: .annotations | empty | not + command: | + kubectl annotate {{ .hostname }} {{- range $k,$v := .annotations }}{{ printf "%s=%s" $k $v}} {{- end }} + +# Reset local DNS for control_plane_endpoint to 127.0.0.1 and ::1. +# This ensures the control_plane_endpoint resolves locally before kube-vip is running, +# preventing failures for tasks that execute kubectl apply on the current node. +- name: Join | Reset local DNS for control_plane_endpoint + block: + - name: Join | Reset local DNS on control plane nodes + when: + - .groups.kube_control_plane | default list | has .inventory_hostname + loop: "{{ .native.localDNS | toJson }}" + command: | + sed -i ':a;$!{N;ba};s@# kubekey kubernetes control_plane_endpoint BEGIN.*# kubekey kubernetes control_plane_endpoint END@@' {{ .item }} + cat >> {{ .item }} <> {{ .item }} <> {{ .item }} <- - {{ .binary_dir }}/helm/{{ .helm_version }}/{{ .binary_type }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz + {{ .binary_dir }}/helm/{{ .kubernetes.helm_version }}/{{ .binary_type }}/helm-{{ .kubernetes.helm_version }}-linux-{{ .binary_type }}.tar.gz dest: >- - {{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz + {{ .tmp_dir }}/helm-{{ .kubernetes.helm_version }}-linux-{{ .binary_type }}.tar.gz - name: Binary | Extract and install Helm binary command: | - tar --strip-components=1 -zxvf {{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin linux-{{ .binary_type }}/helm + tar --strip-components=1 -zxvf {{ .tmp_dir }}/helm-{{ .kubernetes.helm_version }}-linux-{{ .binary_type }}.tar.gz -C /usr/local/bin linux-{{ .binary_type }}/helm - name: Binary | Check if kubeadm is installed ignore_errors: true @@ -23,10 +23,10 @@ register: kubeadm_install_version - name: Binary | Install kubeadm if not present or version mismatch - when: or (.kubeadm_install_version.error | empty | not) (.kubeadm_install_version.stdout | ne .kube_version) + when: or (.kubeadm_install_version.error | empty | not) (.kubeadm_install_version.stdout | ne .kubernetes.kube_version) copy: src: >- - {{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubeadm + {{ .binary_dir }}/kube/{{ .kubernetes.kube_version }}/{{ .binary_type }}/kubeadm dest: /usr/local/bin/kubeadm mode: 0755 @@ -38,10 +38,10 @@ - name: Binary | Install kubectl if not present or version mismatch when: | - or (.kubectl_install_version.error | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version) + or (.kubectl_install_version.error | empty | not) ((get .kubectl_install_version.stdout "Server Version") | ne .kubernetes.kube_version) copy: src: >- - {{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubectl + {{ .binary_dir }}/kube/{{ .kubernetes.kube_version }}/{{ .binary_type }}/kubectl dest: /usr/local/bin/kubectl mode: 0755 @@ -51,12 +51,12 @@ register: kubelet_install_version - name: Binary | Install kubelet if not present or version mismatch - when: or (.kubelet_install_version.error | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version)) + when: or (.kubelet_install_version.error | empty | not) (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kubernetes.kube_version)) block: - name: Binary | Copy kubelet binary to remote host copy: src: >- - {{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type }}/kubelet + {{ .binary_dir }}/kube/{{ .kubernetes.kube_version }}/{{ .binary_type }}/kubelet dest: /usr/local/bin/kubelet mode: 0755 - name: Binary | Deploy kubelet environment configuration @@ -71,14 +71,14 @@ command: systemctl daemon-reload && systemctl enable kubelet.service - name: Binary | Install CNI plugins if version specified - when: .cni_plugins_version | empty | not + when: .cni.cni_plugins_version | empty | not block: - name: Binary | Copy CNI plugins archive to remote host copy: src: >- - {{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .binary_type }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz + {{ .binary_dir }}/cni/plugins/{{ .cni.cni_plugins_version }}/{{ .binary_type }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni.cni_plugins_version }}.tgz dest: >- - {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz + {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni.cni_plugins_version }}.tgz - name: Binary | Extract and install CNI plugins command: | - tar -zxvf {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni_plugins_version }}.tgz -C /opt/cni/bin/ \ No newline at end of file + tar -zxvf {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type }}-{{ .cni.cni_plugins_version }}.tgz -C /opt/cni/bin/ \ No newline at end of file diff --git a/builtin/core/roles/kubernetes/pre-kubernetes/tasks/main.yaml b/builtin/core/roles/kubernetes/pre-kubernetes/tasks/main.yaml index ec9361ba..f4a28bb3 100644 --- a/builtin/core/roles/kubernetes/pre-kubernetes/tasks/main.yaml +++ b/builtin/core/roles/kubernetes/pre-kubernetes/tasks/main.yaml @@ -26,7 +26,7 @@ copy: src: audit dest: /etc/kubernetes/audit/ - when: .kubernetes.audit + when: .audit - name: PreKubernetes | Synchronize cluster CA files to control plane nodes when: @@ -47,23 +47,23 @@ - name: PreKubernetes | Ensure external etcd certificates are present on control plane nodes when: - - .kubernetes.etcd.deployment_type | eq "external" + - .etcd.deployment_type | eq "external" - .groups.kube_control_plane | default list | has .inventory_hostname block: - name: PreKubernetes | Copy etcd CA certificate to control plane node copy: src: >- - {{ .work_dir }}/kubekey/pki/root.crt + {{ .etcd.ca_file }} dest: /etc/kubernetes/pki/etcd/ca.crt - name: PreKubernetes | Copy etcd client certificate to control plane node copy: src: >- - {{ .work_dir }}/kubekey/pki/etcd.crt + {{ .etcd.cert_file }} dest: /etc/kubernetes/pki/etcd/client.crt - name: PreKubernetes | Copy etcd client key to control plane node copy: src: >- - {{ .work_dir }}/kubekey/pki/etcd.key + {{ .etcd.key_file }} dest: /etc/kubernetes/pki/etcd/client.key - name: PreKubernetes | Synchronize front-proxy CA files to control plane nodes diff --git a/builtin/core/roles/native/dns/tasks/main.yaml b/builtin/core/roles/native/dns/tasks/main.yaml new file mode 100644 index 00000000..5795ea71 --- /dev/null +++ b/builtin/core/roles/native/dns/tasks/main.yaml @@ -0,0 +1,149 @@ +- name: DNS | Ensure local DNS entries are up-to-date + loop: "{{ .native.localDNS | toJson }}" + command: | + # Clean up any previous Kubekey-managed DNS blocks + sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' {{ .item }} + sed -i '/^$/N;/\n$/N;//D' {{ .item }} + # Write the latest Kubekey DNS configuration + cat >> {{ .item }} <> {{ .item }} <> {{ .item }} <> $chronyConfigFile # Add NTP server entries - {{- range $server := .ntp.servers }} + {{- range $server := .native.ntp.servers }} {{- $internalIPv4 := "" }} {{- $internalIPv6 := "" }} {{- range $.hostvars }} @@ -37,14 +37,14 @@ {{- end }} {{- end }} when: - - .ntp.enabled - - .ntp.servers | empty | not + - .native.ntp.enabled + - .native.ntp.servers | empty | not - name: Timezone | Set system timezone and NTP synchronization command: | - timedatectl set-timezone {{ .timezone }} - timedatectl set-ntp {{ and .ntp.enabled (.ntp.servers | empty | not) }} - when: or (and .ntp.enabled (.ntp.servers | empty | not)) (.timezone | empty | not) + timedatectl set-timezone {{ .native.timezone }} + timedatectl set-ntp {{ and .native.ntp.enabled (.native.ntp.servers | empty | not) }} + when: or (and .native.ntp.enabled (.native.ntp.servers | empty | not)) (.native.timezone | empty | not) - name: NTP | Restart NTP service command: | @@ -52,4 +52,4 @@ systemctl restart chrony.service {{- end }} systemctl restart chronyd.service - when: or (and .ntp.enabled (.ntp.servers | empty | not)) (.timezone | empty | not) + when: or (and .native.ntp.enabled (.native.ntp.servers | empty | not)) (.native.timezone | empty | not) diff --git a/builtin/core/roles/native/repository/tasks/install_package.yaml b/builtin/core/roles/native/repository/tasks/install_package.yaml new file mode 100644 index 00000000..f01c6abb --- /dev/null +++ b/builtin/core/roles/native/repository/tasks/install_package.yaml @@ -0,0 +1,76 @@ +- name: Repository | Initialize Debian-based repository and install required system packages + command: | + now=$(date +"%Y-%m-%d %H:%M:%S") + PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}" + PKGS_TO_INSTALL="" + for pkg in $PKGS; do + if [ -n "$pkg" ]; then + dpkg -s $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg" + fi + done + if [ -f "{{ .tmp_dir }}/repository.iso" ]; then + # Backup current APT sources + mv /etc/apt/sources.list /etc/apt/sources.list.kubekey-$now.bak + mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey-$now.bak + mkdir -p /etc/apt/sources.list.d + # Configure local repository + rm -rf /etc/apt/sources.list.d/* + echo 'deb [trusted=yes] file://{{ .tmp_dir }}/iso /' > /etc/apt/sources.list.d/kubekey.list + # Update package index + apt-get update + # Install missing packages + if [ -n "$PKGS_TO_INSTALL" ]; then + apt install -y $PKGS_TO_INSTALL + fi + # Restore original APT sources + rm -rf /etc/apt/sources.list.d + mv /etc/apt/sources.list.kubekey.bak-$now /etc/apt/sources.list + mv /etc/apt/sources.list.d.kubekey.bak-$now /etc/apt/sources.list.d + else + # No local ISO found, using default repositories + apt-get update + if [ -n "$PKGS_TO_INSTALL" ]; then + apt install -y $PKGS_TO_INSTALL + fi + fi + when: .os.release.ID_LIKE | eq "debian" + +- name: Repository | Initialize RHEL-based repository and install required system packages + command: | + now=$(date +"%Y-%m-%d %H:%M:%S") + PKGS="socat conntrack ipset ebtables chrony ipvsadm{{ if .groups.nfs | default list | has .inventory_hostname }} nfs-kernel-server{{ end }}" + PKGS_TO_INSTALL="" + for pkg in $PKGS; do + if [ -n "$pkg" ]; then + rpm -q $pkg >/dev/null 2>&1 || PKGS_TO_INSTALL="$PKGS_TO_INSTALL $pkg" + fi + done + if [ -f "{{ .tmp_dir }}/repository.iso" ]; then + # Backup current YUM repositories + mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak-$now + mkdir -p /etc/yum.repos.d + # Configure local repository + rm -rf /etc/yum.repos.d/* + cat < /etc/yum.repos.d/CentOS-local.repo + [base-local] + name=Local RPM Repository + baseurl=file://{{ .tmp_dir }}/repository.iso + enabled=1 + gpgcheck=0 + EOF + # Refresh repository cache + yum clean all && yum makecache + # Install missing packages + if [ -n "$PKGS_TO_INSTALL" ]; then + yum install -y $PKGS_TO_INSTALL + fi + # Restore original YUM repositories + rm -rf /etc/yum.repos.d + mv /etc/yum.repos.d.kubekey.bak-$now /etc/yum.repos.d + else + # No local ISO found, using default repositories + if [ -n "$PKGS_TO_INSTALL" ]; then + yum install -y $PKGS_TO_INSTALL + fi + fi + when: .os.release.ID_LIKE | eq "\"rhel fedora\"" diff --git a/builtin/core/roles/native/repository/tasks/main.yaml b/builtin/core/roles/native/repository/tasks/main.yaml new file mode 100644 index 00000000..d49093aa --- /dev/null +++ b/builtin/core/roles/native/repository/tasks/main.yaml @@ -0,0 +1,23 @@ +--- +- name: Repository | Synchronize local repository ISO image + block: + - name: Repository | Copy local repository ISO file + ignore_errors: true + copy: + src: >- + {{ .binary_dir }}/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type }}.iso + dest: >- + {{ .tmp_dir }}/repository.iso + - name: Repository | Mount repository ISO to temporary directory + command: | + if [ -f "{{ .tmp_dir }}/repository.iso" ]; then + mount -t iso9660 -o loop {{ .tmp_dir }}/repository.iso {{ .tmp_dir }}/iso + fi + - name: Repository | Initialize package repositories and install system dependencies + include_tasks: install_package.yaml + always: + - name: Repository | Unmount repository ISO from temporary directory + command: | + if [ -f "{{ .tmp_dir }}/repository.iso" ]; then + umount {{ .tmp_dir }}/iso + fi diff --git a/builtin/core/roles/precheck/artifact/tasks/main.yaml b/builtin/core/roles/precheck/artifact/tasks/main.yaml new file mode 100644 index 00000000..9ae35fb3 --- /dev/null +++ b/builtin/core/roles/precheck/artifact/tasks/main.yaml @@ -0,0 +1,29 @@ +--- +- name: Artifact | Ensure artifact file exists + when: .download.artifact_file | empty | not + command: | + if [ ! -f "{{ .download.artifact_file }}" ]; then + echo "Error: Artifact file '{{ .download.artifact_file }}' does not exist." + exit 1 + fi + +- name: Artifact | Validate artifact file extension + when: + - .download.artifact_file | empty | not + loop: ['.tgz','.tar.gz'] + command: | + if [[ "{{ .download.artifact_file }}" != *{{ .item }} ]]; then + echo "Error: Artifact file '{{ .download.artifact_file }}' does not have the required extension '{{ .item }}'." + exit 1 + fi + +- name: Artifact | Verify artifact MD5 checksum + when: + - .download.artifact_md5 | empty | not + - .download.artifact_file | empty | not + command: | + actual_md5=$(md5sum {{ .download.artifact_file }} | awk '{print $1}') + if [[ "$actual_md5" != "{{ .download.artifact_md5 }}" ]]; then + echo "Error: MD5 checksum mismatch for '{{ .download.artifact_file }}'. Expected '{{ .download.artifact_md5 }}', got '$actual_md5'." + exit 1 + fi diff --git a/builtin/core/roles/precheck/artifact_check/tasks/main.yaml b/builtin/core/roles/precheck/artifact_check/tasks/main.yaml deleted file mode 100644 index 7725f7e0..00000000 --- a/builtin/core/roles/precheck/artifact_check/tasks/main.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Artifact | Ensure artifact file exists - command: | - if [ ! -f "{{ .artifact.artifact_file }}" ]; then - echo "Error: Artifact file '{{ .artifact.artifact_file }}' does not exist." - exit 1 - fi - -- name: Artifact | Validate artifact file extension - command: | - if [[ "{{ .artifact.artifact_file }}" != *{{ .item }} ]]; then - echo "Error: Artifact file '{{ .artifact.artifact_file }}' does not have the required extension '{{ .item }}'." - exit 1 - fi - loop: ['.tgz','.tar.gz'] - -- name: Artifact | Verify artifact MD5 checksum - command: | - actual_md5=$(md5sum {{ .artifact.artifact_file }} | awk '{print $1}') - if [[ "$actual_md5" != "{{ .artifact.artifact_md5 }}" ]]; then - echo "Error: MD5 checksum mismatch for '{{ .artifact.artifact_file }}'. Expected '{{ .artifact.artifact_md5 }}', got '$actual_md5'." - exit 1 - fi - when: - - .artifact.artifact_md5 | empty | not diff --git a/builtin/core/roles/precheck/env_check/tasks/cri.yaml b/builtin/core/roles/precheck/cri/tasks/main.yaml similarity index 78% rename from builtin/core/roles/precheck/env_check/tasks/cri.yaml rename to builtin/core/roles/precheck/cri/tasks/main.yaml index 045b06d2..5454e678 100644 --- a/builtin/core/roles/precheck/env_check/tasks/cri.yaml +++ b/builtin/core/roles/precheck/cri/tasks/main.yaml @@ -1,18 +1,17 @@ --- - name: CRI | Fail if container manager is not docker or containerd + run_once: true assert: that: .cluster_require.require_container_manager | has .cri.container_manager fail_msg: >- The specified container manager "{{ .cri.container_manager }}" is not supported. Please use one of the following: {{ .cluster_require.require_container_manager | toJson }}. - run_once: true - when: .cri.container_manager | empty | not - name: CRI | Validate minimum required containerd version - assert: - that: .containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required) - fail_msg: >- - The detected containerd version ({{ .containerd_version }}) is below the minimum required version: {{ .cluster_require.containerd_min_version_required }}. run_once: true when: - - .containerd_version | empty | not + - .cri.containerd_version | empty | not - .cri.container_manager | eq "containerd" + assert: + that: .cri.containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required) + fail_msg: >- + The detected containerd version ({{ .containerd_version }}) is below the minimum required version: {{ .cluster_require.containerd_min_version_required }}. diff --git a/builtin/core/roles/precheck/env_check/defaults/main.yaml b/builtin/core/roles/precheck/env_check/defaults/main.yaml deleted file mode 100644 index a2a82910..00000000 --- a/builtin/core/roles/precheck/env_check/defaults/main.yaml +++ /dev/null @@ -1,29 +0,0 @@ -cluster_require: - # the etcd sync duration for 99%.(unit ns) - etcd_disk_wal_fysnc_duration_seconds: 10000000 - allow_unsupported_distribution_setup: false - # support ubuntu, centos. - supported_os_distributions: - - ubuntu - - '"ubuntu"' - - centos - - '"centos"' - require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn'] - # the minimal version of kubernetes to be installed. - kube_version_min_required: v1.23.0 - # memory size for each kube_control_plane node.(unit kB) - # should be greater than or equal to minimal_master_memory_mb. - minimal_master_memory_mb: 10 - # memory size for each kube_worker node.(unit kB) - # should be greater than or equal to minimal_node_memory_mb. - minimal_node_memory_mb: 10 - require_etcd_deployment_type: ['internal','external'] - require_container_manager: ['docker', 'containerd'] - # the minimal required version of containerd to be installed. - containerd_min_version_required: v1.6.0 - supported_architectures: - - amd64 - - x86_64 - - arm64 - - aarch64 - min_kernel_version: 4.9.17 \ No newline at end of file diff --git a/builtin/core/roles/precheck/env_check/tasks/image_registry.yaml b/builtin/core/roles/precheck/env_check/tasks/image_registry.yaml deleted file mode 100644 index 948038f4..00000000 --- a/builtin/core/roles/precheck/env_check/tasks/image_registry.yaml +++ /dev/null @@ -1,29 +0,0 @@ -- name: ImageRegistry | Verify successful authentication to image registry - when: .image_registry.auth | empty | not - run_once: true - command: | - HTTP_CODE=$(curl -skLI -w "%{http_code}" -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" "https://{{ .image_registry.auth.registry }}/v2/" -o /dev/null) - if [[ "$HTTP_CODE" == "200" ]]; then - echo "Successfully authenticated to the image registry." - else - echo "Failed to authenticate to the image registry at {{ .image_registry.auth.registry }}." >&2 - fi - -# The image_registry is deployed using docker_compose -- name: ImageRegistry | Ensure docker and docker-compose versions are specified - when: .groups.image_registry | empty | not - assert: - that: - - .docker_version | empty | not - - .dockercompose_version | empty | not - msg: >- - Both "docker_version" and "dockercompose_version" must be provided for the image registry deployment. - -- name: ImageRegistry | Ensure keepalived_version is specified for high availability - when: - - .image_registry.ha_vip | empty | not - - .groups.image_registry | len | lt 1 - assert: - that: .keepalived_version | empty | not - msg: >- - "keepalived_version" must be specified when configuring the image registry for high availability. \ No newline at end of file diff --git a/builtin/core/roles/precheck/env_check/tasks/main.yaml b/builtin/core/roles/precheck/env_check/tasks/main.yaml deleted file mode 100644 index e9c6b61b..00000000 --- a/builtin/core/roles/precheck/env_check/tasks/main.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- include_tasks: kubernetes.yaml - -- include_tasks: etcd.yaml - tags: ["etcd"] - -- include_tasks: os.yaml - tags: ["os"] - -- include_tasks: network.yaml - tags: ["network"] - -- include_tasks: cri.yaml - tags: ["cri"] - -- include_tasks: nfs.yaml - tags: ["nfs"] - -- include_tasks: image_registry.yaml - tags: ["image_registry"] diff --git a/builtin/core/roles/precheck/env_check/tasks/etcd.yaml b/builtin/core/roles/precheck/etcd/tasks/main.yaml similarity index 60% rename from builtin/core/roles/precheck/env_check/tasks/etcd.yaml rename to builtin/core/roles/precheck/etcd/tasks/main.yaml index e951093c..99d890cc 100644 --- a/builtin/core/roles/precheck/env_check/tasks/etcd.yaml +++ b/builtin/core/roles/precheck/etcd/tasks/main.yaml @@ -1,27 +1,10 @@ --- -- name: ETCD | Fail if etcd deployment type is not 'internal' or 'external' - assert: - that: .cluster_require.require_etcd_deployment_type | has .kubernetes.etcd.deployment_type - fail_msg: >- - Invalid etcd deployment type: '{{ .kubernetes.etcd.deployment_type }}'. Expected 'internal' or 'external'. +- name: Kubernetes | Fail if etcd deployment type is not "internal" or "external" run_once: true - when: .kubernetes.etcd.deployment_type | empty | not - -- name: ETCD | Fail if etcd group is empty in external etcd mode assert: - that: .groups.etcd | empty | not + that: .cluster_require.require_etcd_deployment_type | has .etcd.deployment_type fail_msg: >- - The "etcd" group must not be empty when using external etcd mode. - run_once: true - when: .kubernetes.etcd.deployment_type | eq "external" - -- name: ETCD | Fail if the number of etcd hosts is even - assert: - that: (mod (.groups.etcd | len) 2) | eq 1 - fail_msg: >- - The number of etcd nodes must be odd to ensure quorum. Current count: {{ .groups.etcd | len }}. - run_once: true - when: .kubernetes.etcd.deployment_type | eq "external" + Invalid etcd deployment type: "{{ .etcd.deployment_type }}". Expected "internal" or "external". ## https://cwiki.yunify.com/pages/viewpage.action?pageId=145920824 - name: ETCD | Validate disk I/O performance for etcd @@ -40,6 +23,7 @@ mkdir -p {{ .tmp_dir }}/etcd/test-data fio --rw=write --ioengine=sync --fdatasync=1 --directory={{ .tmp_dir }}/etcd/test-data --size=22m --bs=2300 --name=mytest --output-format=json register: fio_result + register_type: json - name: ETCD | Assert disk fsync latency meets requirements assert: that: (index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000") | le .cluster_require.etcd_disk_wal_fysnc_duration_seconds diff --git a/builtin/core/roles/precheck/inventory/tasks/main.yaml b/builtin/core/roles/precheck/inventory/tasks/main.yaml new file mode 100644 index 00000000..c884b0fa --- /dev/null +++ b/builtin/core/roles/precheck/inventory/tasks/main.yaml @@ -0,0 +1,21 @@ +- name: Inventory | Kubernetes groups must not be empty + when: .kubernetes.kube_version | empty | not + assert: + that: + - .groups.k8s_cluster | empty | not + - .groups.kube_control_plane | empty | not + fail_msg: >- + The Kubernetes inventory groups "k8s_cluster" and "kube_control_plane" must not be empty. Please ensure both groups are present and correctly specified in your inventory file. + +- name: Inventory | etcd group must not be empty + when: .kubernetes.kube_version | empty | not + assert: + that: .groups.etcd | empty | not + fail_msg: >- + The etcd inventory group "etcd" must not be empty. Please verify that this group is properly defined in your inventory. + +- name: Inventory | Fail if the number of etcd hosts is even + assert: + that: (mod (.groups.etcd | len) 2) | eq 1 + fail_msg: >- + The number of etcd nodes must be odd to maintain quorum. The current count is {{ .groups.etcd | len }}. Please adjust your inventory so that the etcd group contains an odd number of hosts. diff --git a/builtin/core/roles/precheck/env_check/tasks/kubernetes.yaml b/builtin/core/roles/precheck/kubernetes/tasks/main.yaml similarity index 60% rename from builtin/core/roles/precheck/env_check/tasks/kubernetes.yaml rename to builtin/core/roles/precheck/kubernetes/tasks/main.yaml index ae364045..2e057e2d 100644 --- a/builtin/core/roles/precheck/env_check/tasks/kubernetes.yaml +++ b/builtin/core/roles/precheck/kubernetes/tasks/main.yaml @@ -1,11 +1,6 @@ -- name: Kubernetes | Ensure either internal_ipv4 or internal_ipv6 is defined - assert: - that: or (.internal_ipv4 | empty | not) (.internal_ipv6 | empty | not) - fail_msg: >- - Either "internal_ipv4" or "internal_ipv6" must be specified. Both cannot be empty. - - name: Kubernetes | Validate kube-vip address run_once: true + when: .kubernetes.control_plane_endpoint.type | eq "kube_vip" assert: that: - .kubernetes.control_plane_endpoint.kube_vip.address | empty | not @@ -13,7 +8,7 @@ - | {{- $existIP := false }} {{- range .groups.all | default list }} - {{- if eq $.kubernetes.control_plane_endpoint.kube_vip.address (index $.hostvars . "internal_ipv4") }} + {{- if or ($.kubernetes.control_plane_endpoint.kube_vip.address | eq (index $.hostvars . "internal_ipv4" | default "")) ($.kubernetes.control_plane_endpoint.kube_vip.address | eq (index $.hostvars . "internal_ipv6" | default "")) }} {{- $existIP = true }} {{- end }} {{- end }} @@ -21,39 +16,41 @@ fail_msg: >- The value of "kubernetes.control_plane_endpoint.kube_vip.address" must be an IP address that is not currently assigned to any node. - when: .kubernetes.control_plane_endpoint.type | eq "kube_vip" - - name: Kubernetes | Fail if unsupported Kubernetes version run_once: true assert: - that: .kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required) + that: + - .kubernetes.kube_version | empty | not + - .kubernetes.kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required) fail_msg: >- This version of KubeKey only supports Kubernetes versions greater than or equal to {{ .cluster_require.kube_version_min_required }}. You are attempting to use version {{ .kube_version }}. - when: .kube_version | empty | not - name: Kubernetes | Check if Kubernetes is installed - when: .groups.k8s_cluster | default list | has .inventory_hostname block: - - name: Kubernetes | Retrieve kubelet.service LoadState - command: systemctl show kubelet.service -p LoadState --value - register: kubernetes_install_LoadState - - name: Retrieve kubelet.service ActiveState - command: systemctl show kubelet.service -p ActiveState --value - register: kubernetes_install_ActiveState - - name: Retrieve installed Kubernetes version - ignore_errors: true - command: kubelet --version - register: kubernetes_install_version - - name: Validate Kubernetes service status and version + - name: Kubernetes | Validate Kubernetes service status and version when: .kubernetes_install_LoadState.stdout | eq "loaded" block: - - name: Ensure kubelet service is active + - name: Kubernetes | Ensure kubelet service is active assert: that: .kubernetes_install_ActiveState.stdout | eq "active" fail_msg: >- The kubelet service must be running and active when it is loaded. - - name: Ensure installed Kubernetes version matches expected version + - name: Kubernetes | Ensure installed Kubernetes version matches expected version assert: that: .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " | eq .kube_version fail_msg: >- The installed Kubernetes version ({{ .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " }}) does not match the expected version ({{ .kube_version }}). + +- name: ImageRegistry | Verify successful authentication to image registry + when: + - .image_registry.auth.registry | empty | not + - .image_registry.type | empty + run_once: true + command: | + HTTP_CODE=$(curl -skLI -w "%{http_code}" -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" "https://{{ .image_registry.auth.registry }}/v2/" -o /dev/null) + if [[ "$HTTP_CODE" == "200" ]]; then + echo "Successfully authenticated to the image registry." + else + echo "Failed to authenticate to the image registry at {{ .image_registry.auth.registry }}." >&2 + exit 1 + fi diff --git a/builtin/core/roles/precheck/meta/main.yaml b/builtin/core/roles/precheck/meta/main.yaml new file mode 100644 index 00000000..da5249f9 --- /dev/null +++ b/builtin/core/roles/precheck/meta/main.yaml @@ -0,0 +1,32 @@ +dependencies: + - role: precheck/inventory + when: .inventory_hostname | eq "localhost" + + - role: precheck/artifact + tags: ["artifact"] + when: .inventory_hostname | eq "localhost" + + - role: precheck/kubernetes + tags: ["kubernetes"] + when: .groups.k8s_cluster | default list | has .inventory_hostname + + - role: precheck/etcd + tags: ["etcd"] + when: .groups.etcd | default list | has .inventory_hostname + + - role: precheck/os + tags: ["os"] + when: .inventory_hostname | ne "localhost" + + - role: precheck/network + tags: ["network"] + when: .inventory_hostname | ne "localhost" + + - role: precheck/cri + tags: ["cri"] + when: .groups.k8s_cluster | default list | has .inventory_hostname + + - role: precheck/nfs + tags: ["nfs"] + when: .groups.nfs | default list | has .inventory_hostname + diff --git a/builtin/core/roles/precheck/env_check/tasks/network.yaml b/builtin/core/roles/precheck/network/tasks/main.yaml similarity index 55% rename from builtin/core/roles/precheck/env_check/tasks/network.yaml rename to builtin/core/roles/precheck/network/tasks/main.yaml index 05e75845..54f8b218 100644 --- a/builtin/core/roles/precheck/env_check/tasks/network.yaml +++ b/builtin/core/roles/precheck/network/tasks/main.yaml @@ -1,14 +1,22 @@ --- +- name: Network | Ensure either internal_ipv4 or internal_ipv6 is defined + assert: + that: or (.internal_ipv4 | empty | not) (.internal_ipv6 | empty | not) + fail_msg: >- + Either "internal_ipv4" or "internal_ipv6" must be specified. Both cannot be empty. + - name: Network | Ensure required network interfaces are present command: | {{- if .internal_ipv4 | empty | not }} if ! ip -o addr show | grep -q {{ .internal_ipv4 }}; then echo 'The specified IPv4 address is not assigned to any network interface.' >&2 + exit 1 fi {{- end }} {{- if .internal_ipv6 | empty | not }} if ! ip -o addr show | grep -q {{ .internal_ipv6 }}; then echo 'The specified IPv6 address is not assigned to any network interface.' >&2 + exit 1 fi {{- end }} @@ -17,47 +25,47 @@ run_once: true block: - name: Network | Check pod CIDR includes both IPv4 and IPv6 - when: .kubernetes.networking.pod_cidr | empty | not + when: .cni.pod_cidr | empty | not assert: - that: .kubernetes.networking.pod_cidr | splitList "," | len | ge 2 + that: .cni.pod_cidr | splitList "," | len | ge 2 fail_msg: >- - "kubernetes.networking.pod_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr. + "cni.pod_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr. - name: Network | Check service CIDR includes both IPv4 and IPv6 - when: .kubernetes.networking.service_cidr | empty | not + when: .cni.service_cidr | empty | not assert: - that: .kubernetes.networking.service_cidr | splitList "," | len | ge 2 + that: .cni.service_cidr | splitList "," | len | ge 2 fail_msg: >- - "kubernetes.networking.service_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr. + "cni.service_cidr" must specify both IPv4 and IPv6 ranges, using either the format ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr. - name: Network | Ensure pod networking is properly configured for dual-stack when: - - .kubernetes.networking.pod_cidr | empty | not - - .kubernetes.networking.pod_cidr | splitList "," | len | eq 2 + - .cni.pod_cidr | empty | not + - .cni.pod_cidr | splitList "," | len | eq 2 assert: that: - - .kube_version | semverCompare ">=v1.20.0" - - .kubernetes.networking.pod_cidr | splitList "," | first | ipFamily | eq "IPv4" - - .kubernetes.networking.pod_cidr | splitList "," | last | ipFamily | eq "IPv6" + - .kubernetes.kube_version | semverCompare ">=v1.20.0" + - .cni.pod_cidr | splitList "," | first | ipFamily | eq "IPv4" + - .cni.pod_cidr | splitList "," | last | ipFamily | eq "IPv6" fail_msg: >- Dual-stack pod networking is only supported in Kubernetes v1.20.0 or newer. - name: Network | Ensure service networking is properly configured for dual-stack when: - - .kubernetes.networking.service_cidr | empty | not - - .kubernetes.networking.service_cidr | splitList "," | len | eq 2 + - .cni.service_cidr | empty | not + - .cni.service_cidr | splitList "," | len | eq 2 assert: that: - - .kube_version | semverCompare ">=v1.20.0" - - .kubernetes.networking.service_cidr | splitList "," | first | ipFamily | eq "IPv4" - - .kubernetes.networking.service_cidr | splitList "," | last | ipFamily | eq "IPv6" + - .kubernetes.kube_version | semverCompare ">=v1.20.0" + - .cni.service_cidr | splitList "," | first | ipFamily | eq "IPv4" + - .cni.service_cidr | splitList "," | last | ipFamily | eq "IPv6" fail_msg: >- Dual-stack service networking is only supported in Kubernetes v1.20.0 or newer. - name: Network | Fail if the selected network plugin is not supported run_once: true + when: .cni.type | empty | not assert: - that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin + that: .cluster_require.require_network_plugin | has .cni.type fail_msg: >- - The network plugin "{{ .kubernetes.kube_network_plugin }}" is not supported. Please select a supported network plugin. - when: .kubernetes.kube_network_plugin | empty | not + The network plugin "{{ .cni.type }}" is not supported. Please select a supported network plugin. # Note: This check is intentionally conservative. While it is technically possible to schedule more pods than the available addresses in the CIDR range (for example, if some pods use the host network), this cannot be reliably determined at provisioning time. This check ensures there is enough address space for the configured maximum pods per node. # Note: IPv6-only scenarios are not checked here. @@ -66,15 +74,15 @@ when: .groups.k8s_cluster | default list | has .inventory_hostname block: - name: Network | Ensure enough IPv4 addresses are available for pods - when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily | eq "IPv4" + when: .cni.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily | eq "IPv4" assert: - that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 32 (.kubernetes.networking.ipv4_mask_size | default 24)))) 2) + that: le (.cni.max_pods | default 110) (sub (pow 2 (float64 (sub 32 (.cni.ipv4_mask_size | default 24)))) 2) fail_msg: >- The configured maximum number of pods per node exceeds the number of available IPv4 addresses in the pod CIDR range. - name: Network | Ensure enough IPv6 addresses are available for pods - when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily | eq "IPv6" + when: .cni.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily | eq "IPv6" assert: - that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 128 (.kubernetes.networking.ipv4_mask_size | default 64)))) 2) + that: le (.cni.max_pods | default 110) (sub (pow 2 (float64 (sub 128 (.cni.ipv4_mask_size | default 64)))) 2) fail_msg: >- The configured maximum number of pods per node exceeds the number of available IPv6 addresses in the pod CIDR range. @@ -82,8 +90,8 @@ - name: Network | Fail if Kubernetes version is too old for hybridnet run_once: true assert: - that: .kube_version | semverCompare ">=v1.16.0" + that: .kubernetes.kube_version | semverCompare ">=v1.16.0" fail_msg: >- Hybridnet requires Kubernetes version 1.16.0 or newer. when: - - .kubernetes.kube_network_plugin | eq "hybridnet" \ No newline at end of file + - .cni.type | eq "hybridnet" \ No newline at end of file diff --git a/builtin/core/roles/precheck/env_check/tasks/nfs.yaml b/builtin/core/roles/precheck/nfs/tasks/main.yaml similarity index 91% rename from builtin/core/roles/precheck/env_check/tasks/nfs.yaml rename to builtin/core/roles/precheck/nfs/tasks/main.yaml index dac9ce0f..3beb8c9e 100644 --- a/builtin/core/roles/precheck/env_check/tasks/nfs.yaml +++ b/builtin/core/roles/precheck/nfs/tasks/main.yaml @@ -3,4 +3,3 @@ assert: that: .groups.nfs | default list | len | eq 1 fail_msg: "Exactly one NFS server must be specified. Multiple NFS servers are not supported." - when: .groups.nfs diff --git a/builtin/core/roles/precheck/env_check/tasks/os.yaml b/builtin/core/roles/precheck/os/tasks/main.yaml similarity index 100% rename from builtin/core/roles/precheck/env_check/tasks/os.yaml rename to builtin/core/roles/precheck/os/tasks/main.yaml diff --git a/builtin/core/roles/install/security/tasks/main.yaml b/builtin/core/roles/security/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/security/tasks/main.yaml rename to builtin/core/roles/security/tasks/main.yaml diff --git a/builtin/core/roles/install/storageclass/local/tasks/main.yaml b/builtin/core/roles/storage-class/local/tasks/main.yaml similarity index 100% rename from builtin/core/roles/install/storageclass/local/tasks/main.yaml rename to builtin/core/roles/storage-class/local/tasks/main.yaml diff --git a/builtin/core/roles/install/storageclass/local/templates/local-volume.yaml b/builtin/core/roles/storage-class/local/templates/local-volume.yaml similarity index 90% rename from builtin/core/roles/install/storageclass/local/templates/local-volume.yaml rename to builtin/core/roles/storage-class/local/templates/local-volume.yaml index 0505ed6d..0b1350ea 100644 --- a/builtin/core/roles/install/storageclass/local/templates/local-volume.yaml +++ b/builtin/core/roles/storage-class/local/templates/local-volume.yaml @@ -6,13 +6,13 @@ metadata: name: local annotations: storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]' - storageclass.beta.kubernetes.io/is-default-class: "{{ if .sc.local.default }}true{{ else }}false{{ end }}" + storageclass.beta.kubernetes.io/is-default-class: "{{ if .storage_class.local.default }}true{{ else }}false{{ end }}" openebs.io/cas-type: local cas.openebs.io/config: | - name: StorageType value: "hostpath" - name: BasePath - value: "{{ .sc.local.path }}" + value: "{{ .storage_class.local.path }}" provisioner: openebs.io/local volumeBindingMode: WaitForFirstConsumer reclaimPolicy: Delete @@ -100,7 +100,7 @@ spec: containers: - name: openebs-provisioner-hostpath imagePullPolicy: IfNotPresent - image: {{ .sc.local.provisioner_image.registry }}/{{ .sc.local.provisioner_image.repository }}:{{ .sc.local.provisioner_image.tag }} + image: {{ .storage_class.local.provisioner_image.registry }}/{{ .storage_class.local.provisioner_image.repository }}:{{ .storage_class.local.provisioner_image.tag }} env: # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s # based on this address. This is ignored if empty. @@ -131,7 +131,7 @@ spec: - name: OPENEBS_IO_INSTALLER_TYPE value: "openebs-operator-lite" - name: OPENEBS_IO_HELPER_IMAGE - value: "{{ .sc.local.linux_utils_image.registry }}/{{ .sc.local.linux_utils_image.repository }}:{{ .sc.local.linux_utils_image.tag }}" + value: "{{ .storage_class.local.linux_utils_image.registry }}/{{ .storage_class.local.linux_utils_image.repository }}:{{ .storage_class.local.linux_utils_image.tag }}" # LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default # leader election is enabled. #- name: LEADER_ELECTION_ENABLED diff --git a/builtin/core/roles/storage-class/meta/main.yaml b/builtin/core/roles/storage-class/meta/main.yaml new file mode 100644 index 00000000..b1a57dac --- /dev/null +++ b/builtin/core/roles/storage-class/meta/main.yaml @@ -0,0 +1,7 @@ +--- +dependencies: + - role: storage-class/local + when: .storage_class.local.enabled + + - role: storage-class/nfs + when: .storage_class.nfs.enabled diff --git a/builtin/core/roles/install/storageclass/nfs/tasks/main.yaml b/builtin/core/roles/storage-class/nfs/tasks/main.yaml similarity index 61% rename from builtin/core/roles/install/storageclass/nfs/tasks/main.yaml rename to builtin/core/roles/storage-class/nfs/tasks/main.yaml index cc0343b7..a3f4300b 100644 --- a/builtin/core/roles/install/storageclass/nfs/tasks/main.yaml +++ b/builtin/core/roles/storage-class/nfs/tasks/main.yaml @@ -2,12 +2,12 @@ - name: NFS | Synchronize NFS provisioner Helm chart to remote host copy: src: >- - {{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz + {{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .storage_class.nfs_provisioner_version }}.tgz dest: >- - /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz + /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .storage_class.nfs_provisioner_version }}.tgz - name: NFS | Deploy the NFS provisioner using Helm command: | helm upgrade --install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz --namespace kube-system \ - --set nfs.server={{ .sc.nfs.server }} --set nfs.path={{ .sc.nfs.path }} \ - --set storageClass.defaultClass={{ if .sc.local.default }}true{{ else }}false{{ end }} + --set nfs.server={{ .storage_class.nfs.server }} --set nfs.path={{ .storage_class.nfs.path }} \ + --set storageClass.defaultClass={{ if .storage_class.local.default }}true{{ else }}false{{ end }} diff --git a/builtin/core/roles/uninstall/cri/containerd/defaults/main.yaml b/builtin/core/roles/uninstall/cri/containerd/defaults/main.yaml new file mode 100644 index 00000000..ba5501ce --- /dev/null +++ b/builtin/core/roles/uninstall/cri/containerd/defaults/main.yaml @@ -0,0 +1,3 @@ +cri: + containerd: + data_root: /var/lib/containerd \ No newline at end of file diff --git a/builtin/core/roles/uninstall/cri/defaults/main.yaml b/builtin/core/roles/uninstall/cri/defaults/main.yaml deleted file mode 100644 index 09a985cf..00000000 --- a/builtin/core/roles/uninstall/cri/defaults/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -cri: - containerd: - data_root: /var/lib/containerd - docker: - data_root: /var/lib/docker - # support: containerd,docker - container_manager: docker \ No newline at end of file diff --git a/builtin/core/roles/uninstall/cri/docker/defaults/main.yaml b/builtin/core/roles/uninstall/cri/docker/defaults/main.yaml new file mode 100644 index 00000000..888b6f79 --- /dev/null +++ b/builtin/core/roles/uninstall/cri/docker/defaults/main.yaml @@ -0,0 +1,3 @@ +cri: + docker: + data_root: /var/lib/docker \ No newline at end of file diff --git a/builtin/core/roles/uninstall/cri/docker/tasks/main.yaml b/builtin/core/roles/uninstall/cri/docker/tasks/main.yaml index c26a1cff..9c607677 100644 --- a/builtin/core/roles/uninstall/cri/docker/tasks/main.yaml +++ b/builtin/core/roles/uninstall/cri/docker/tasks/main.yaml @@ -4,4 +4,4 @@ # uninstall cridockerd - include_tasks: cridockerd.yaml when: - - .cridockerd_version | empty | not + - .cri.cridockerd_version | empty | not diff --git a/builtin/core/roles/uninstall/etcd/defaults/main.yaml b/builtin/core/roles/uninstall/etcd/defaults/main.yaml deleted file mode 100644 index 0e43d377..00000000 --- a/builtin/core/roles/uninstall/etcd/defaults/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ -etcd: - env: - data_dir: /var/lib/etcd - traffic_priority: false \ No newline at end of file diff --git a/builtin/core/roles/uninstall/image-registry/defaults/main.yaml b/builtin/core/roles/uninstall/image-registry/defaults/main.yaml deleted file mode 100644 index d07bbc2f..00000000 --- a/builtin/core/roles/uninstall/image-registry/defaults/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ -image_registry: - type: harbor - # Virtual IP address for repository High Availability. the Virtual IP address should be available. \ No newline at end of file diff --git a/builtin/core/roles/uninstall/image-registry/docker-registry/defaults/main.yaml b/builtin/core/roles/uninstall/image-registry/docker-registry/defaults/main.yaml index 1ab56f14..fc25713f 100644 --- a/builtin/core/roles/uninstall/image-registry/docker-registry/defaults/main.yaml +++ b/builtin/core/roles/uninstall/image-registry/docker-registry/defaults/main.yaml @@ -1,6 +1,37 @@ image_registry: docker_registry: + version: 2 + config: + storage: nfs + nfs_dir: /share/registry storage: filesystem: rootdir: /opt/docker-registry/data # nfs_mount: /repository/registry # if set. will mount rootdirectory to nfs server in nfs_mount. +# azure: +# accountname: accountname +# accountkey: base64encodedaccountkey +# container: containername +# gcs: +# bucket: bucketname +# keyfile: /path/to/keyfile +# credentials: +# type: service_account +# project_id: project_id_string +# private_key_id: private_key_id_string +# private_key: private_key_string +# client_email: client@example.com +# client_id: client_id_string +# auth_uri: http://example.com/auth_uri +# token_uri: http://example.com/token_uri +# auth_provider_x509_cert_url: http://example.com/provider_cert_url +# client_x509_cert_url: http://example.com/client_cert_url +# rootdirectory: /gcs/object/name/prefix +# s3: +# accesskey: awsaccesskey +# secretkey: awssecretkey +# region: us-west-1 +# regionendpoint: http://myobjects.local +# bucket: bucketname +# keyid: mykeyid +# rootdirectory: /s3/object/name/prefix diff --git a/cmd/kk/app/options/builtin/add.go b/cmd/kk/app/options/builtin/add.go index 995dbec2..c6c6ff65 100644 --- a/cmd/kk/app/options/builtin/add.go +++ b/cmd/kk/app/options/builtin/add.go @@ -29,7 +29,6 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" cliflag "k8s.io/component-base/cli/flag" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" @@ -44,15 +43,6 @@ func NewAddNodeOptions() *AddNodeOptions { CommonOptions: options.NewCommonOptions(), Kubernetes: defaultKubeVersion, } - - o.CommonOptions.GetConfigFunc = func() (*kkcorev1.Config, error) { - data, err := getConfig(o.Kubernetes) - if err != nil { - return nil, err - } - config := &kkcorev1.Config{} - return config, errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", o.Kubernetes) - } o.CommonOptions.GetInventoryFunc = getInventory return o diff --git a/cmd/kk/app/options/builtin/artifact.go b/cmd/kk/app/options/builtin/artifact.go index 5f7ae2a8..aae6b4f9 100644 --- a/cmd/kk/app/options/builtin/artifact.go +++ b/cmd/kk/app/options/builtin/artifact.go @@ -20,6 +20,8 @@ limitations under the License. package builtin import ( + "fmt" + "github.com/cockroachdb/errors" kkcorev1 "github.com/kubesphere/kubekey/api/core/v1" "github.com/spf13/cobra" @@ -36,6 +38,8 @@ import ( // ArtifactExportOptions for NewArtifactExportOptions type ArtifactExportOptions struct { options.CommonOptions + // kubernetes version which the cluster will install. + Kubernetes string } // NewArtifactExportOptions for newArtifactExportCommand @@ -49,7 +53,11 @@ func NewArtifactExportOptions() *ArtifactExportOptions { // Flags add to newArtifactExportCommand func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets { - return o.CommonOptions.Flags() + fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("config") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", o.Kubernetes, fmt.Sprintf("Specify a supported version of kubernetes. default is %s", o.Kubernetes)) + + return fss } // Complete options. create Playbook, Config and Inventory @@ -88,6 +96,10 @@ func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kk // ArtifactImagesOptions for NewArtifactImagesOptions type ArtifactImagesOptions struct { options.CommonOptions + // kubernetes version which the cluster will install. + Kubernetes string + Push bool + Pull bool } // NewArtifactImagesOptions for newArtifactImagesCommand @@ -100,7 +112,13 @@ func NewArtifactImagesOptions() *ArtifactImagesOptions { // Flags add to newArtifactImagesCommand func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets { - return o.CommonOptions.Flags() + fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("config") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", o.Kubernetes, fmt.Sprintf("Specify a supported version of kubernetes. default is %s", o.Kubernetes)) + kfs.BoolVar(&o.Push, "push", o.Push, "Push image to image registry") + kfs.BoolVar(&o.Pull, "pull", o.Pull, "Pull image to binary dir") + + return fss } // Complete options. create Playbook, Config and Inventory @@ -121,9 +139,17 @@ func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kk } o.Playbook = args[0] + var tags []string + if o.Push { + tags = append(tags, "push") + } + if o.Pull { + tags = append(tags, "pull") + } + playbook.Spec = kkcorev1.PlaybookSpec{ Playbook: o.Playbook, - Tags: []string{"only_image"}, + Tags: tags, } if err := o.CommonOptions.Complete(playbook); err != nil { diff --git a/cmd/kk/app/options/builtin/certs.go b/cmd/kk/app/options/builtin/certs.go index b64f0d61..3580bb3a 100644 --- a/cmd/kk/app/options/builtin/certs.go +++ b/cmd/kk/app/options/builtin/certs.go @@ -20,6 +20,8 @@ limitations under the License. package builtin import ( + "fmt" + "github.com/cockroachdb/errors" kkcorev1 "github.com/kubesphere/kubekey/api/core/v1" "github.com/spf13/cobra" @@ -41,11 +43,17 @@ func NewCertsRenewOptions() *CertsRenewOptions { // CertsRenewOptions for NewCertsRenewOptions type CertsRenewOptions struct { options.CommonOptions + // kubernetes version which the cluster will install. + Kubernetes string } // Flags add to newCertsRenewCommand func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets { - return o.CommonOptions.Flags() + fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("config") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", o.Kubernetes, fmt.Sprintf("Specify a supported version of kubernetes. default is %s", o.Kubernetes)) + + return fss } // Complete options. create Playbook, Config and Inventory diff --git a/cmd/kk/app/options/builtin/create.go b/cmd/kk/app/options/builtin/create.go index a219034b..b6b8f7a5 100644 --- a/cmd/kk/app/options/builtin/create.go +++ b/cmd/kk/app/options/builtin/create.go @@ -29,7 +29,6 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" cliflag "k8s.io/component-base/cli/flag" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" @@ -46,14 +45,6 @@ func NewCreateClusterOptions() *CreateClusterOptions { CommonOptions: options.NewCommonOptions(), Kubernetes: defaultKubeVersion, } - o.CommonOptions.GetConfigFunc = func() (*kkcorev1.Config, error) { - data, err := getConfig(o.Kubernetes) - if err != nil { - return nil, err - } - config := &kkcorev1.Config{} - return config, errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", o.Kubernetes) - } o.CommonOptions.GetInventoryFunc = getInventory return o diff --git a/cmd/kk/app/options/builtin/delete.go b/cmd/kk/app/options/builtin/delete.go index 6f602c31..5dff56a8 100644 --- a/cmd/kk/app/options/builtin/delete.go +++ b/cmd/kk/app/options/builtin/delete.go @@ -27,7 +27,6 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" cliflag "k8s.io/component-base/cli/flag" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" @@ -45,14 +44,6 @@ func NewDeleteClusterOptions() *DeleteClusterOptions { Kubernetes: defaultKubeVersion, } // Set the function to get the config for the specified Kubernetes version - o.CommonOptions.GetConfigFunc = func() (*kkcorev1.Config, error) { - data, err := getConfig(o.Kubernetes) - if err != nil { - return nil, err - } - config := &kkcorev1.Config{} - return config, errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", o.Kubernetes) - } // Set the function to get the inventory o.CommonOptions.GetInventoryFunc = getInventory @@ -132,15 +123,6 @@ func NewDeleteNodesOptions() *DeleteNodesOptions { CommonOptions: options.NewCommonOptions(), Kubernetes: defaultKubeVersion, } - // Set the function to get the config for the specified Kubernetes version - o.CommonOptions.GetConfigFunc = func() (*kkcorev1.Config, error) { - data, err := getConfig(o.Kubernetes) - if err != nil { - return nil, err - } - config := &kkcorev1.Config{} - return config, errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", o.Kubernetes) - } // Set the function to get the inventory o.CommonOptions.GetInventoryFunc = getInventory @@ -234,11 +216,15 @@ func NewDeleteRegistryOptions() *DeleteRegistryOptions { // DeleteRegistryOptions contains options for deleting an image_registry created by kubekey type DeleteRegistryOptions struct { options.CommonOptions + // kubernetes version which the config will install. + Kubernetes string } // Flags returns the flag sets for DeleteImageRegistryOptions func (o *DeleteRegistryOptions) Flags() cliflag.NamedFlagSets { fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("config") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", o.Kubernetes, fmt.Sprintf("Specify a supported version of kubernetes. default is %s", o.Kubernetes)) return fss } diff --git a/cmd/kk/app/options/builtin/init.go b/cmd/kk/app/options/builtin/init.go index b25a1f45..82a7a132 100644 --- a/cmd/kk/app/options/builtin/init.go +++ b/cmd/kk/app/options/builtin/init.go @@ -20,6 +20,7 @@ limitations under the License. package builtin import ( + "fmt" "path/filepath" "github.com/cockroachdb/errors" @@ -109,6 +110,8 @@ func (o *InitOSOptions) completeConfig() error { // InitRegistryOptions for NewInitRegistryOptions type InitRegistryOptions struct { options.CommonOptions + // kubernetes version which the config will install. + Kubernetes string } // NewInitRegistryOptions for newInitRegistryCommand @@ -122,7 +125,11 @@ func NewInitRegistryOptions() *InitRegistryOptions { // Flags add to newInitRegistryCommand func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets { - return o.CommonOptions.Flags() + fss := o.CommonOptions.Flags() + kfs := fss.FlagSet("config") + kfs.StringVar(&o.Kubernetes, "with-kubernetes", o.Kubernetes, fmt.Sprintf("Specify a supported version of kubernetes. default is %s", o.Kubernetes)) + + return fss } // Complete options. create Playbook, Config and Inventory @@ -145,6 +152,7 @@ func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkco playbook.Spec = kkcorev1.PlaybookSpec{ Playbook: o.Playbook, + Tags: []string{"image_registry"}, } return playbook, o.CommonOptions.Complete(playbook) diff --git a/cmd/kk/app/options/builtin/precheck.go b/cmd/kk/app/options/builtin/precheck.go index 1dcfa1f8..c7290007 100644 --- a/cmd/kk/app/options/builtin/precheck.go +++ b/cmd/kk/app/options/builtin/precheck.go @@ -26,7 +26,6 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/api/core/v1" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" cliflag "k8s.io/component-base/cli/flag" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" @@ -39,14 +38,6 @@ func NewPreCheckOptions() *PreCheckOptions { CommonOptions: options.NewCommonOptions(), Kubernetes: defaultKubeVersion, } - o.CommonOptions.GetConfigFunc = func() (*kkcorev1.Config, error) { - data, err := getConfig(o.Kubernetes) - if err != nil { - return nil, err - } - config := &kkcorev1.Config{} - return config, errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", o.Kubernetes) - } o.CommonOptions.GetInventoryFunc = getInventory return o diff --git a/cmd/kk/app/options/option.go b/cmd/kk/app/options/option.go index e0960a67..ed7a8270 100644 --- a/cmd/kk/app/options/option.go +++ b/cmd/kk/app/options/option.go @@ -68,8 +68,7 @@ type CommonOptions struct { Namespace string // Config is the kubekey core configuration. - Config *kkcorev1.Config - GetConfigFunc ConfigFunc + Config *kkcorev1.Config // Inventory is the kubekey core inventory. Inventory *kkcorev1.Inventory GetInventoryFunc InventoryFunc @@ -188,12 +187,6 @@ func (o *CommonOptions) Complete(playbook *kkcorev1.Playbook) error { if err := yaml.Unmarshal(data, o.Config); err != nil { return errors.Wrapf(err, "failed to unmarshal config from file %q", o.ConfigFile) } - } else if o.GetConfigFunc != nil { - config, err := o.GetConfigFunc() - if err != nil { - return err - } - o.Config = config } if o.InventoryFile != "" { data, err := os.ReadFile(o.InventoryFile) diff --git a/pkg/executor/block_executor.go b/pkg/executor/block_executor.go index 36573060..23c1a146 100644 --- a/pkg/executor/block_executor.go +++ b/pkg/executor/block_executor.go @@ -37,15 +37,8 @@ func (e blockExecutor) Exec(ctx context.Context) error { tags := e.dealTags(block.Taggable) ignoreErrors := e.dealIgnoreErrors(block.IgnoreErrors) when := e.dealWhen(block.When) - - // check tags - if !tags.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) { - // if not match the tags. skip - continue - } - // merge variable which defined in block - if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars, hosts...)); err != nil { + if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars.Nodes, hosts...)); err != nil { return err } @@ -57,8 +50,12 @@ func (e blockExecutor) Exec(ctx context.Context) error { case block.IncludeTasks != "": // do nothing. include tasks has converted to blocks. default: - if err := e.dealTask(ctx, hosts, when, block); err != nil { - return err + // check tags + if tags.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) { + // if not match the tags. skip + if err := e.dealTask(ctx, hosts, when, block); err != nil { + return err + } } } } diff --git a/pkg/executor/playbook_executor.go b/pkg/executor/playbook_executor.go index b825a299..7d6b14cb 100644 --- a/pkg/executor/playbook_executor.go +++ b/pkg/executor/playbook_executor.go @@ -92,11 +92,6 @@ func (e playbookExecutor) Exec(ctx context.Context) (retErr error) { return err } for _, play := range pb.Play { - // check tags - if !play.Taggable.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) { - // if not match the tags. skip - continue - } // hosts should contain all host's name. hosts should not be empty. var hosts []string if err := e.dealHosts(play.PlayHost, &hosts); err != nil { @@ -104,10 +99,14 @@ func (e playbookExecutor) Exec(ctx context.Context) (retErr error) { continue } - // when gather_fact is set. get host's information from remote. - if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil { - return err + // check tags + if play.Taggable.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) { + // when gather_fact is set. get host's information from remote. + if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil { + return err + } } + // Batch execution, with each batch being a group of hosts run in serial. var batchHosts [][]string if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil { @@ -160,7 +159,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P return errors.Errorf("host is empty") } - if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil { + if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars.Nodes, serials...)); err != nil { return err } // generate task from pre tasks diff --git a/pkg/executor/role_executor.go b/pkg/executor/role_executor.go index 11a8d003..1ab23d7e 100644 --- a/pkg/executor/role_executor.go +++ b/pkg/executor/role_executor.go @@ -27,13 +27,8 @@ type roleExecutor struct { // Exec executes the role, including its dependencies and blocks. // It checks tags, merges variables, and recursively executes dependent roles and blocks. func (e roleExecutor) Exec(ctx context.Context) error { - // check tags: skip execution if tags do not match - if !e.tags.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) { - // if not match the tags. skip - return nil - } // merge variables defined in the role for the current hosts - if err := e.variable.Merge(variable.MergeRuntimeVariable(e.role.Vars, e.hosts...)); err != nil { + if err := e.variable.Merge(variable.MergeRuntimeVariable(e.role.Vars.Nodes, e.hosts...)); err != nil { return err } // deal dependency role: execute all role dependencies recursively diff --git a/pkg/executor/task_executor.go b/pkg/executor/task_executor.go index ca7e7dee..8c0f8db6 100644 --- a/pkg/executor/task_executor.go +++ b/pkg/executor/task_executor.go @@ -40,7 +40,7 @@ func (e *taskExecutor) Exec(ctx context.Context) error { } // create task if err := e.client.Create(ctx, e.task); err != nil { - return errors.Wrapf(err, "failed to create task %q", e.task.Spec.Name) + return errors.Wrapf(err, "failed to create task %v", e.task) } defer func() { e.playbook.Status.Statistics.Total++ @@ -142,11 +142,7 @@ func (e *taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) { defer func() { if resErr != nil { errMsg = resErr.Error() - if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { - klog.V(5).ErrorS(resErr, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "error", errMsg, "task", ctrlclient.ObjectKeyFromObject(e.task)) - } else { - klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "error", errMsg, "task", ctrlclient.ObjectKeyFromObject(e.task)) - } + klog.V(5).ErrorS(resErr, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "error", errMsg, "task", ctrlclient.ObjectKeyFromObject(e.task)) } resErr = errors.Join(resErr, e.dealRegister(h, stdout, stderr, errMsg)) @@ -171,7 +167,11 @@ func (e *taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) { return } // check when condition - if skip := e.dealWhen(had, &stdout, &stderr); skip { + if skip, err := e.dealWhen(had); err != nil { + resErr = err + return + } else if skip { + stdout = modules.StdoutSkip return } // execute module in loop with loop item. @@ -331,23 +331,18 @@ func (e *taskExecutor) dealLoop(ha map[string]any) []any { // dealWhen evaluates the "when" conditions for a task to determine if it should be skipped. // Returns true if the task should be skipped, false if it should proceed. -func (e *taskExecutor) dealWhen(had map[string]any, stdout, stderr *string) bool { +func (e *taskExecutor) dealWhen(had map[string]any) (bool, error) { if len(e.task.Spec.When) > 0 { ok, err := tmpl.ParseBool(had, e.task.Spec.When...) if err != nil { - klog.V(5).ErrorS(err, "validate when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task)) - *stderr = fmt.Sprintf("parse when condition error: %v", err) - - return true + return false, err } if !ok { - *stdout = modules.StdoutSkip - - return true + return true, nil } } - return false + return false, nil } // dealFailedWhen evaluates the "failed_when" conditions for a task to determine if it should fail. diff --git a/pkg/modules/copy.go b/pkg/modules/copy.go index 04e04ea0..ef4c3075 100644 --- a/pkg/modules/copy.go +++ b/pkg/modules/copy.go @@ -86,13 +86,15 @@ Return Values: - On failure: Returns error message in stderr */ +// copyArgs holds the arguments for the copy module. type copyArgs struct { - src string - content string - dest string - mode *uint32 + src string // Source file or directory path (local) + content string // Content to write to the destination file (if no src) + dest string // Destination path on the remote host + mode *uint32 // Optional file mode/permissions } +// newCopyArgs parses and validates the arguments for the copy module. func newCopyArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*copyArgs, error) { var err error ca := ©Args{} @@ -116,7 +118,7 @@ func newCopyArgs(_ context.Context, raw runtime.RawExtension, vars map[string]an return ca, nil } -// ModuleCopy handles the "copy" module, copying files or content to remote hosts +// ModuleCopy handles the "copy" module, copying files or content to remote hosts. func ModuleCopy(ctx context.Context, options ExecOptions) (string, string, error) { // get host variable ha, err := options.getAllVariables() @@ -146,15 +148,16 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string, error } } -// copySrc copy src file to dest +// copySrc copies the source file or directory to the destination on the remote host. func (ca copyArgs) copySrc(ctx context.Context, options ExecOptions, conn connector.Connector) (string, string, error) { - if filepath.IsAbs(ca.src) { // if src is absolute path. find it in local path + if filepath.IsAbs(ca.src) { // if src is absolute path, find it in local path return ca.handleAbsolutePath(ctx, conn) } - // if src is not absolute path. find file in project + // if src is not absolute path, find file in project return ca.handleRelativePath(ctx, options, conn) } +// handleAbsolutePath handles copying when the source is an absolute path. func (ca copyArgs) handleAbsolutePath(ctx context.Context, conn connector.Connector) (string, string, error) { fileInfo, err := os.Stat(ca.src) if err != nil { @@ -162,7 +165,7 @@ func (ca copyArgs) handleAbsolutePath(ctx context.Context, conn connector.Connec } if fileInfo.IsDir() { // src is dir - if err := ca.absDir(ctx, conn); err != nil { + if err := ca.copyAbsoluteDir(ctx, conn); err != nil { return StdoutFailed, "failed to copy absolute dir", err } return StdoutSuccess, "", nil @@ -173,12 +176,13 @@ func (ca copyArgs) handleAbsolutePath(ctx context.Context, conn connector.Connec if err != nil { return StdoutFailed, "failed to read absolute file", err } - if err := ca.readFile(ctx, data, fileInfo.Mode(), conn); err != nil { + if err := ca.copyFile(ctx, data, fileInfo.Mode(), conn); err != nil { return StdoutFailed, "failed to copy absolute file", err } return StdoutSuccess, "", nil } +// handleRelativePath handles copying when the source is a relative path (from the project). func (ca copyArgs) handleRelativePath(ctx context.Context, options ExecOptions, conn connector.Connector) (string, string, error) { pj, err := project.New(ctx, options.Playbook, false) if err != nil { @@ -192,7 +196,7 @@ func (ca copyArgs) handleRelativePath(ctx context.Context, options ExecOptions, } if fileInfo.IsDir() { - if err := ca.handleRelativeDir(ctx, pj, relPath, conn); err != nil { + if err := ca.copyRelativeDir(ctx, pj, relPath, conn); err != nil { return StdoutFailed, "failed to copy relative dir", err } @@ -204,85 +208,18 @@ func (ca copyArgs) handleRelativePath(ctx context.Context, options ExecOptions, if err != nil { return StdoutFailed, "failed to read relative file", err } - if err := ca.readFile(ctx, data, fileInfo.Mode(), conn); err != nil { + if err := ca.copyFile(ctx, data, fileInfo.Mode(), conn); err != nil { return StdoutFailed, "failed to copy relative file", err } return StdoutSuccess, "", nil } -func (ca copyArgs) handleRelativeDir(ctx context.Context, pj project.Project, relPath string, conn connector.Connector) error { - return pj.WalkDir(relPath, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if d.IsDir() { // only copy file - return nil - } - - info, err := d.Info() - if err != nil { - return errors.Wrap(err, "failed to get file info") - } - - mode := info.Mode() - if ca.mode != nil { - mode = os.FileMode(*ca.mode) - } - - data, err := pj.ReadFile(path) - if err != nil { - return errors.Wrap(err, "failed to read file") - } - - dest := ca.dest - if strings.HasSuffix(ca.dest, "/") { - rel, err := pj.Rel(relPath, path) - if err != nil { - return errors.Wrap(err, "failed to get relative file path") - } - dest = filepath.Join(ca.dest, rel) - } - - return conn.PutFile(ctx, data, dest, mode) - }) -} - -// copyContent convert content param and copy to dest -func (ca copyArgs) copyContent(ctx context.Context, mode fs.FileMode, conn connector.Connector) (string, string, error) { - if strings.HasSuffix(ca.dest, "/") { - return StdoutFailed, StderrUnsupportArgs, errors.New("\"content\" should copy to a file") - } - - if ca.mode != nil { - mode = os.FileMode(*ca.mode) - } - - if err := conn.PutFile(ctx, []byte(ca.content), ca.dest, mode); err != nil { - return StdoutFailed, "failed to copy file", err - } - - return StdoutSuccess, "", nil -} - -// absFile when copy.src is absolute file, get file from os, and copy to remote. -func (ca copyArgs) readFile(ctx context.Context, data []byte, mode fs.FileMode, conn connector.Connector) error { - dest := ca.dest - if strings.HasSuffix(ca.dest, "/") { - dest = filepath.Join(ca.dest, filepath.Base(ca.src)) - } - - if ca.mode != nil { - mode = os.FileMode(*ca.mode) - } - - return conn.PutFile(ctx, data, dest, mode) -} - -// absDir when copy.src is absolute dir, get all files from os, and copy to remote. -func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error { +// copyAbsoluteDir copies all files from an absolute directory to the remote host. +func (ca copyArgs) copyAbsoluteDir(ctx context.Context, conn connector.Connector) error { return filepath.WalkDir(ca.src, func(path string, d fs.DirEntry, err error) error { - if d.IsDir() { // only copy file + // Only copy files, skip directories + if d.IsDir() { return nil } @@ -318,6 +255,79 @@ func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error { }) } +// copyRelativeDir copies all files from a relative directory (in the project) to the remote host. +func (ca copyArgs) copyRelativeDir(ctx context.Context, pj project.Project, relPath string, conn connector.Connector) error { + return pj.WalkDir(relPath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + // Only copy files, skip directories + if d.IsDir() { + return nil + } + + info, err := d.Info() + if err != nil { + return errors.Wrap(err, "failed to get file info") + } + + mode := info.Mode() + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + data, err := pj.ReadFile(path) + if err != nil { + return errors.Wrap(err, "failed to read file") + } + + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + rel, err := pj.Rel(relPath, path) + if err != nil { + return errors.Wrap(err, "failed to get relative file path") + } + dest = filepath.Join(ca.dest, rel) + } + + return conn.PutFile(ctx, data, dest, mode) + }) +} + +// copyContent converts the content param and copies it to the destination file on the remote host. +func (ca copyArgs) copyContent(ctx context.Context, mode fs.FileMode, conn connector.Connector) (string, string, error) { + // Content must be copied to a file, not a directory + if strings.HasSuffix(ca.dest, "/") { + return StdoutFailed, StderrUnsupportArgs, errors.New("\"content\" should copy to a file") + } + + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + if err := conn.PutFile(ctx, []byte(ca.content), ca.dest, mode); err != nil { + return StdoutFailed, "failed to copy file", err + } + + return StdoutSuccess, "", nil +} + +// copyFile copies a file (data) to the destination on the remote host. +// If the destination is a directory, the file is placed inside it with its base name. +func (ca copyArgs) copyFile(ctx context.Context, data []byte, mode fs.FileMode, conn connector.Connector) error { + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + dest = filepath.Join(ca.dest, filepath.Base(ca.src)) + } + + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + return conn.PutFile(ctx, data, dest, mode) +} + +// Register the "copy" module at init. func init() { utilruntime.Must(RegisterModule("copy", ModuleCopy)) } diff --git a/pkg/modules/include_vars.go b/pkg/modules/include_vars.go index 6d31c542..d0cbe5cb 100644 --- a/pkg/modules/include_vars.go +++ b/pkg/modules/include_vars.go @@ -12,7 +12,6 @@ import ( _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/project" - "github.com/kubesphere/kubekey/v4/pkg/utils" "github.com/kubesphere/kubekey/v4/pkg/variable" ) @@ -56,7 +55,7 @@ func ModuleIncludeVars(ctx context.Context, options ExecOptions) (string, string if !filepath.IsLocal(arg.includeVars) { return StdoutFailed, "can not read remote file", errors.New("can not read remote file") } - if !utils.HasSuffixIn(arg.includeVars, []string{"yaml", "yml"}) { + if filepath.Ext(arg.includeVars) != ".yaml" && filepath.Ext(arg.includeVars) != ".yml" { return StdoutFailed, "input file type wrong", errors.New("input file type wrong") } diff --git a/pkg/modules/include_vars_test.go b/pkg/modules/include_vars_test.go index 4a4621f7..0ef5a4ad 100644 --- a/pkg/modules/include_vars_test.go +++ b/pkg/modules/include_vars_test.go @@ -18,9 +18,7 @@ func TestModuleIncludeVars(t *testing.T) { name: "include remote path", opt: ExecOptions{ Args: runtime.RawExtension{ - Raw: []byte(`{ -"include_vars": "http://127.0.0.1:8080/include_vars", -}`), + Raw: []byte("http://127.0.0.1:8080/include_vars"), }, Variable: newTestVariable(nil, nil), }, @@ -29,9 +27,7 @@ func TestModuleIncludeVars(t *testing.T) { name: "include empty path", opt: ExecOptions{ Args: runtime.RawExtension{ - Raw: []byte(`{ -"include_vars": "", -}`), + Raw: []byte(""), }, Variable: newTestVariable(nil, nil), }, @@ -39,10 +35,9 @@ func TestModuleIncludeVars(t *testing.T) { }, { name: "include path not exist", opt: ExecOptions{ + Args: runtime.RawExtension{ - Raw: []byte(`{ -"include_vars": "/path/not/exist/not_exist.yaml", -}`), + Raw: []byte("/path/not/exist/not_exist.yaml"), }, Variable: newTestVariable(nil, nil), }, diff --git a/pkg/project/project.go b/pkg/project/project.go index 334f0b3d..85faf8a6 100644 --- a/pkg/project/project.go +++ b/pkg/project/project.go @@ -129,10 +129,6 @@ func (f *project) loadPlaybook(basePlaybook string) error { } for _, p := range plays { - if !p.VarsFromMarshal.IsZero() { - p.Vars = append(p.Vars, p.VarsFromMarshal) - } - if err := f.dealImportPlaybook(p, basePlaybook); err != nil { return err } @@ -212,7 +208,7 @@ func (f *project) dealVarsFiles(p *kkprojectv1.Play, basePlaybook string) error // combine map node if node.Content[0].Kind == yaml.MappingNode { // skip empty file - p.Vars = append(p.Vars, *node.Content[0]) + p.Vars.Nodes = append(p.Vars.Nodes, *node.Content[0]) } } @@ -234,9 +230,6 @@ func (f *project) dealRole(role *kkprojectv1.Role, basePlaybook string) error { if err := yaml.Unmarshal(mdata, roleMeta); err != nil { return errors.Wrapf(err, "failed to unmarshal role meta file %q", meta) } - if !roleMeta.VarsFromMarshal.IsZero() { - roleMeta.Vars = append(roleMeta.Vars, roleMeta.VarsFromMarshal) - } for _, dep := range roleMeta.RoleDependency { if err := f.dealRole(&dep, basePlaybook); err != nil { return errors.Wrapf(err, "failed to deal dependency role base %q", role.Role) @@ -254,11 +247,6 @@ func (f *project) dealRole(role *kkprojectv1.Role, basePlaybook string) error { if err := yaml.Unmarshal(rdata, &blocks); err != nil { return errors.Wrapf(err, "failed to unmarshal yaml file %q", task) } - for i, b := range blocks { - if !b.VarsFromMarshal.IsZero() { - blocks[i].Vars = append(b.Vars, b.VarsFromMarshal) - } - } role.Block = blocks } // deal defaults (optional) @@ -299,7 +287,7 @@ func (f *project) combineRoleVars(role *kkprojectv1.Role, content []byte) error // combine map node if node.Content[0].Kind == yaml.MappingNode { // skip empty file - role.Vars = append(role.Vars, *node.Content[0]) + role.Vars.Nodes = append(role.Vars.Nodes, *node.Content[0]) } return nil } @@ -349,11 +337,6 @@ func (f *project) dealBlock(top string, source string, blocks []kkprojectv1.Bloc if err := yaml.Unmarshal(data, &includeBlocks); err != nil { return errors.Wrapf(err, "failed to unmarshal includeTask file %q", includeTask) } - for i, b := range includeBlocks { - if !b.VarsFromMarshal.IsZero() { - includeBlocks[i].Vars = append(b.Vars, b.VarsFromMarshal) - } - } // Recursively process the included blocks if err := f.dealBlock(top, filepath.Dir(includeTask), includeBlocks); err != nil { return err diff --git a/pkg/project/project_test.go b/pkg/project/project_test.go index c22b6a8b..f3743584 100644 --- a/pkg/project/project_test.go +++ b/pkg/project/project_test.go @@ -308,48 +308,28 @@ func TestMarshalPlaybook(t *testing.T) { { Base: kkprojectv1.Base{ Name: "playbook-var1", - VarsFromMarshal: yaml.Node{ - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 6, - Column: 5, - Content: []*yaml.Node{ + Vars: kkprojectv1.Vars{ + Nodes: []yaml.Node{ { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a", + Kind: yaml.MappingNode, + Tag: "!!map", Line: 6, Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "b", - Line: 6, - Column: 8, - }, - }, - }, - Vars: []yaml.Node{ - { - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 6, - Column: 5, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a", - Line: 6, - Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "b", - Line: 6, - Column: 8, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a", + Line: 6, + Column: 5, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "b", + Line: 6, + Column: 8, + }, }, }, }, @@ -375,89 +355,92 @@ func TestMarshalPlaybook(t *testing.T) { VarsFiles: []string{"vars/var1.yaml", "vars/var2.yaml"}, Base: kkprojectv1.Base{ Name: "playbook-var2", - Vars: []yaml.Node{ - { - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 2, - Column: 1, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a1", - Line: 2, - Column: 1, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "aa", - Line: 2, - Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a2", - Line: 3, - Column: 1, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: "1", - Line: 3, - Column: 5, + Vars: kkprojectv1.Vars{ + Nodes: []yaml.Node{ + { + Kind: yaml.MappingNode, + Tag: "!!map", + Line: 2, + Column: 1, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a1", + Line: 2, + Column: 1, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "aa", + Line: 2, + Column: 5, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a2", + Line: 3, + Column: 1, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: "1", + Line: 3, + Column: 5, + }, }, }, - }, { - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 1, - Column: 1, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a2", - Line: 1, - Column: 1, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "aaa", - Line: 1, - Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a3", - Line: 2, - Column: 1, - }, - { - Kind: yaml.MappingNode, - Tag: "!!map", - Value: "", - Line: 3, - Column: 2, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "b3", - Line: 3, - Column: 2, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: "1", - Line: 3, - Column: 6, + { + Kind: yaml.MappingNode, + Tag: "!!map", + Line: 1, + Column: 1, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a2", + Line: 1, + Column: 1, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "aaa", + Line: 1, + Column: 5, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a3", + Line: 2, + Column: 1, + }, + { + Kind: yaml.MappingNode, + Tag: "!!map", + Value: "", + Line: 3, + Column: 2, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "b3", + Line: 3, + Column: 2, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: "1", + Line: 3, + Column: 6, + }, }, }, }, @@ -485,83 +468,64 @@ func TestMarshalPlaybook(t *testing.T) { VarsFiles: []string{"vars/var1.yaml"}, Base: kkprojectv1.Base{ Name: "playbook-var3", - VarsFromMarshal: yaml.Node{ - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 8, - Column: 5, - Content: []*yaml.Node{ + Vars: kkprojectv1.Vars{ + Nodes: []yaml.Node{ { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a2", + Kind: yaml.MappingNode, + Tag: "!!map", Line: 8, Column: 5, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a2", + Line: 8, + Column: 5, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: "2", + Line: 8, + Column: 9, + }, + }, }, { - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: "2", - Line: 8, - Column: 9, - }, - }, - }, - Vars: []yaml.Node{ - { - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 8, - Column: 5, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a2", - Line: 8, - Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: "2", - Line: 8, - Column: 9, - }, - }, - }, { - Kind: yaml.MappingNode, - Tag: "!!map", - Line: 2, - Column: 1, - Content: []*yaml.Node{ - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a1", - Line: 2, - Column: 1, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "aa", - Line: 2, - Column: 5, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: "a2", - Line: 3, - Column: 1, - }, - { - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: "1", - Line: 3, - Column: 5, + Kind: yaml.MappingNode, + Tag: "!!map", + Line: 2, + Column: 1, + Content: []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a1", + Line: 2, + Column: 1, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "aa", + Line: 2, + Column: 5, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: "a2", + Line: 3, + Column: 1, + }, + { + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: "1", + Line: 3, + Column: 5, + }, }, }, }, @@ -639,7 +603,7 @@ func TestMarshalPlaybook(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, tc.except, actual, tc.name) + assert.Equal(t, tc.except, actual) }) } } diff --git a/pkg/utils/files.go b/pkg/utils/files.go index 20e12e17..60948670 100644 --- a/pkg/utils/files.go +++ b/pkg/utils/files.go @@ -4,26 +4,25 @@ import ( "fmt" "io" "io/fs" - "strings" + "path/filepath" + "sort" ) // ReadDirFiles read all file in input fs and dir func ReadDirFiles(fsys fs.FS, dir string, handler func(data []byte) error) error { - d, err := fsys.Open(dir) + entries, err := fs.ReadDir(fsys, dir) if err != nil { - return fmt.Errorf("failed to open path %s with error: %w", dir, err) - } - defer d.Close() - entries, err := d.(fs.ReadDirFile).ReadDir(-1) - if err != nil { - return fmt.Errorf("read dir %s failed with error: %w", dir, err) + return fmt.Errorf("failed to read dir %q: %w", dir, err) } + sort.Slice(entries, func(i, j int) bool { + return entries[i].Name() < entries[j].Name() + }) for _, entry := range entries { if entry.IsDir() { // skip dir continue } - if !HasSuffixIn(entry.Name(), []string{"yaml", "yml"}) { + if filepath.Ext(entry.Name()) != ".yaml" && filepath.Ext(entry.Name()) != ".yml" { continue } filePath := dir + "/" + entry.Name() @@ -51,13 +50,3 @@ func ReadDirFiles(fsys fs.FS, dir string, handler func(data []byte) error) error } return nil } - -// HasSuffixIn check input string a end with one of slice b -func HasSuffixIn(a string, b []string) bool { - for _, suffix := range b { - if strings.HasSuffix(a, suffix) { - return true - } - } - return false -} diff --git a/pkg/web/handler/resources.go b/pkg/web/handler/resources.go index b0bf14de..53998dd4 100644 --- a/pkg/web/handler/resources.go +++ b/pkg/web/handler/resources.go @@ -72,13 +72,7 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful. oldConfig map[string]map[string]any newConfig map[string]map[string]any ) - bodyBytes, err := io.ReadAll(request.Request.Body) - if err != nil { - _ = response.WriteError(http.StatusInternalServerError, err) - return - } - // Read new config from request body. - if err := json.Unmarshal(bodyBytes, &newConfig); err != nil { + if err := request.ReadEntity(&newConfig); err != nil { _ = response.WriteError(http.StatusInternalServerError, err) return } @@ -170,11 +164,17 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful. for fileName, playbook := range playbooks { if playbook.Status.FailureMessage != "" { preCheckResult[fileName] = playbook.Status.FailureMessage + delete(newConfig, fileName) } } + data, err := json.Marshal(newConfig) + if err != nil { + _ = response.WriteError(http.StatusInternalServerError, err) + return + } // Write new config to file. - if err := os.WriteFile(filepath.Join(h.rootPath, api.SchemaConfigFile), bodyBytes, 0644); err != nil { + if err := os.WriteFile(filepath.Join(h.rootPath, api.SchemaConfigFile), data, 0644); err != nil { _ = response.WriteError(http.StatusInternalServerError, err) return }