diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..71213115
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,41 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+*.tmp
+bin
+hack/tools/bin
+
+# Test binary, build with `go test -c`
+*.test
+
+# E2E test templates
+test/e2e/data/infrastructure-kubekey/v1beta1/cluster-template*.yaml
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# IntelliJ
+.idea/
+*.iml
+
+# Vscode files
+.vscode
+
+# rbac and manager config for example provider
+manager_image_patch.yaml-e
+manager_pull_policy.yaml-e
+
+# Sample config files auto-generated by kubebuilder
+config/samples
+
+# test results
+_artifacts
+
+# Used during parts of the build process. Files _should_ get cleaned up automatically.
+# This is also a good location for any temporary manfiests used during development
+tmp
+
+# Used by current object
+/example/test/
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
new file mode 100644
index 00000000..1b9073c1
--- /dev/null
+++ b/CONTRIBUTORS.md
@@ -0,0 +1,121 @@
+### Sincere gratitude goes to the following people for their contributions to Pipeline
+
+Contributions of any kind are welcome! Thanks goes to these wonderful contributors, they made our project grow fast.
+
+
+
+
+
+
+
+
+
+
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..c5aa2053
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,61 @@
+# Build architecture
+ARG ARCH
+ARG builder_image
+
+# Download dependencies
+FROM alpine:3.19.0 as base_os_context
+
+
+ENV OUTDIR=/out
+RUN mkdir -p ${OUTDIR}/usr/local/bin/
+
+WORKDIR /tmp
+
+RUN apk add --no-cache ca-certificates
+
+
+# Build the manager binary
+FROM ${builder_image} as builder
+
+# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
+ARG goproxy=https://goproxy.cn,direct
+ENV GOPROXY=$goproxy
+
+WORKDIR /workspace
+
+COPY go.mod go.mod
+COPY go.sum go.sum
+
+# Cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN --mount=type=cache,target=/go/pkg/mod \
+ go mod download
+
+# Copy the go source
+COPY ./ ./
+
+# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
+RUN --mount=type=cache,target=/root/.cache/go-build \
+ --mount=type=cache,target=/go/pkg/mod \
+ go build -o controller-manager cmd/controller-manager/controller_manager.go
+
+# Build
+ARG ARCH
+ARG LDFLAGS
+
+# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder
+RUN --mount=type=cache,target=/root/.cache/go-build \
+ --mount=type=cache,target=/go/pkg/mod \
+ CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \
+ go build -o controller-manager cmd/controller-manager/controller_manager.go
+
+FROM --platform=${ARCH} alpine:3.19.0
+
+WORKDIR /
+
+RUN mkdir -p /var/lib/kubekey/rootfs
+
+COPY --from=base_os_context /out/ /
+COPY --from=builder /workspace/controller-manager /usr/local/bin
+
+ENTRYPOINT ["sh"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..cd92c18d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018-2020 KubeSphere Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..e037e275
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,627 @@
+# Ensure Make is run with bash shell as some syntax below is bash-specific
+SHELL:=/usr/bin/env bash
+
+.DEFAULT_GOAL:=help
+
+#
+# Go.
+#
+GO_VERSION ?= 1.20
+GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
+
+# Use GOPROXY environment variable if set
+GOPROXY := $(shell go env GOPROXY)
+ifeq ($(GOPROXY),)
+GOPROXY := https://goproxy.cn,direct
+endif
+export GOPROXY
+
+# Active module mode, as we use go modules to manage dependencies
+export GO111MODULE=on
+
+# This option is for running docker manifest command
+export DOCKER_CLI_EXPERIMENTAL := enabled
+
+#
+# Directories.
+#
+# Full directory of where the Makefile resides
+ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+EXP_DIR := exp
+BIN_DIR := bin
+TEST_DIR := test
+TOOLS_DIR := hack/tools
+TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))
+E2E_FRAMEWORK_DIR := $(TEST_DIR)/framework
+GO_INSTALL := ./scripts/go_install.sh
+
+export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)
+
+#
+# Binaries.
+#
+# Note: Need to use abspath so we can invoke these from subdirectories
+KUSTOMIZE_VER := v4.5.2
+KUSTOMIZE_BIN := kustomize
+KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
+KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4
+
+SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9
+SETUP_ENVTEST_BIN := setup-envtest
+SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
+SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
+
+CONTROLLER_GEN_VER := v0.13.0
+CONTROLLER_GEN_BIN := controller-gen
+CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
+CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
+
+GOTESTSUM_VER := v1.6.4
+GOTESTSUM_BIN := gotestsum
+GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER))
+GOTESTSUM_PKG := gotest.tools/gotestsum
+
+HADOLINT_VER := v2.10.0
+HADOLINT_FAILURE_THRESHOLD = warning
+
+GOLANGCI_LINT_BIN := golangci-lint
+GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN))
+
+# Define Docker related variables. Releases should modify and double check these vars.
+REGISTRY ?= docker.io/kubespheredev
+PROD_REGISTRY ?= docker.io/kubesphere
+
+# capkk
+CAPKK_IMAGE_NAME ?= capkk-controller
+CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
+
+# bootstrap
+K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
+K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME)
+
+# control plane
+K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller
+K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
+
+# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
+
+TAG ?= dev
+ARCH ?= $(shell go env GOARCH)
+ALL_ARCH = amd64 arm arm64 ppc64le s390x
+
+# Allow overriding the imagePullPolicy
+PULL_POLICY ?= Always
+
+# Hosts running SELinux need :z added to volume mounts
+SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
+
+ifeq ($(SELINUX_ENABLED),1)
+ DOCKER_VOL_OPTS?=:z
+endif
+
+# Set build time variables including version details
+LDFLAGS := $(shell hack/version.sh)
+
+# Set kk build tags
+BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
+
+.PHONY: all
+all: test managers
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+## --------------------------------------
+## Generate / Manifests
+## --------------------------------------
+
+##@ generate:
+
+ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane
+
+.PHONY: generate
+generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
+ $(MAKE) generate-modules generate-manifests generate-go-deepcopy
+
+.PHONY: generate-manifests
+generate-manifests: ## Run all generate-manifest-* targets
+ $(MAKE) $(addprefix generate-manifests-,$(ALL_GENERATE_MODULES))
+
+.PHONY: generate-manifests-capkk
+generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
+ $(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases"
+ $(CONTROLLER_GEN) \
+ paths=./api/... \
+ paths=./controllers/... \
+ crd:crdVersions=v1 \
+ rbac:roleName=manager-role \
+ output:crd:dir=./config/crd/bases \
+ output:webhook:dir=./config/webhook \
+ webhook
+
+.PHONY: generate-manifests-k3s-bootstrap
+generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
+ $(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases"
+ $(CONTROLLER_GEN) \
+ paths=./bootstrap/k3s/api/... \
+ paths=./bootstrap/k3s/controllers/... \
+ crd:crdVersions=v1 \
+ rbac:roleName=manager-role \
+ output:crd:dir=./bootstrap/k3s/config/crd/bases \
+ output:rbac:dir=./bootstrap/k3s/config/rbac \
+ output:webhook:dir=./bootstrap/k3s/config/webhook \
+ webhook
+
+.PHONY: generate-manifests-k3s-control-plane
+generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
+ $(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases"
+ $(CONTROLLER_GEN) \
+ paths=./controlplane/k3s/api/... \
+ paths=./controlplane/k3s/controllers/... \
+ crd:crdVersions=v1 \
+ rbac:roleName=manager-role \
+ output:crd:dir=./controlplane/k3s/config/crd/bases \
+ output:rbac:dir=./controlplane/k3s/config/rbac \
+ output:webhook:dir=./controlplane/k3s/config/webhook \
+ webhook
+
+.PHONY: generate-go-deepcopy
+generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets
+ $(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES))
+
+.PHONY: generate-go-deepcopy-capkk
+generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for capkk
+ $(MAKE) clean-generated-deepcopy SRC_DIRS="./api"
+ $(CONTROLLER_GEN) \
+ object:headerFile=./hack/boilerplate.go.txt \
+ paths=./api/... \
+
+.PHONY: generate-go-deepcopy-k3s-bootstrap
+generate-go-deepcopy-k3s-bootstrap: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-bootstrap
+ $(MAKE) clean-generated-deepcopy SRC_DIRS="./bootstrap/k3s/api"
+ $(CONTROLLER_GEN) \
+ object:headerFile=./hack/boilerplate.go.txt \
+ paths=./bootstrap/k3s/api/... \
+
+.PHONY: generate-go-deepcopy-k3s-control-plane
+generate-go-deepcopy-k3s-control-plane: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-control-plane
+ $(MAKE) clean-generated-deepcopy SRC_DIRS="./controlplane/k3s/api"
+ $(CONTROLLER_GEN) \
+ object:headerFile=./hack/boilerplate.go.txt \
+ paths=./controlplane/k3s/api/... \
+
+.PHONY: generate-modules
+generate-modules: ## Run go mod tidy to ensure modules are up to date
+ go mod tidy
+
+## --------------------------------------
+## Lint / Verify
+## --------------------------------------
+
+##@ lint and verify:
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT) ## Lint the codebase
+ $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+ cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+ cd $(TOOLS_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
+ ./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
+
+.PHONY: lint-dockerfiles
+lint-dockerfiles:
+ ./scripts/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
+
+.PHONY: verify
+verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) lint-dockerfiles ## Run all verify-* targets
+
+.PHONY: verify-modules
+verify-modules: generate-modules ## Verify go modules are up to date
+ @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(TEST_DIR)/go.mod $(TEST_DIR)/go.sum); then \
+ git diff; \
+ echo "go module files are out of date"; exit 1; \
+ fi
+ @if (find . -name 'go.mod' | xargs -n1 grep -q -i 'k8s.io/client-go.*+incompatible'); then \
+ find . -name "go.mod" -exec grep -i 'k8s.io/client-go.*+incompatible' {} \; -print; \
+ echo "go module contains an incompatible client-go version"; exit 1; \
+ fi
+
+.PHONY: verify-gen
+verify-gen: generate ## Verify go generated files are up to date
+ @if !(git diff --quiet HEAD); then \
+ git diff; \
+ echo "generated files are out of date, run make generate"; exit 1; \
+ fi
+
+## --------------------------------------
+## Binaries
+## --------------------------------------
+
+##@ build:
+
+.PHONY: kk
+kk:
+ CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk github.com/kubesphere/kubekey/v3/cmd/kk;
+
+ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
+
+.PHONY: managers
+managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
+
+.PHONY: manager-capkk
+manager-capkk: ## Build the capkk manager binary into the ./bin folder
+ go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
+
+.PHONY: manager-k3s-bootstrap
+manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
+ go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
+
+.PHONY: manager-k3s-control-plane
+manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
+ go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
+
+.PHONY: docker-pull-prerequisites
+docker-pull-prerequisites:
+ docker pull docker.io/docker/dockerfile:1.4
+ docker pull $(GO_CONTAINER_IMAGE)
+
+.PHONY: docker-build-all
+docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
+
+docker-build-%:
+ $(MAKE) ARCH=$* docker-build
+
+ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
+
+.PHONY: docker-build
+docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
+ $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
+
+.PHONY: docker-build-capkk
+docker-build-capkk: ## Build the docker image for capkk
+ DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
+
+.PHONY: docker-build-k3s-bootstrap
+docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
+ DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
+
+.PHONY: docker-build-k3s-control-plane
+docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
+ DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
+
+.PHONY: docker-build-e2e
+docker-build-e2e: ## Build the docker image for capkk
+ $(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
+
+## --------------------------------------
+## Deployment
+## --------------------------------------
+
+##@ deployment
+
+ifndef ignore-not-found
+ ignore-not-found = false
+endif
+
+.PHONY: install
+install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
+ $(KUSTOMIZE) build config/crd | kubectl apply -f -
+
+.PHONY: uninstall
+uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+ $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
+
+.PHONY: deploy
+deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
+ $(MAKE) set-manifest-image \
+ MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
+ TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
+ cd config/manager
+ $(KUSTOMIZE) build config/default | kubectl apply -f -
+
+.PHONY: undeploy
+undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+ $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
+
+## --------------------------------------
+## Testing
+## --------------------------------------
+
+##@ test:
+
+ARTIFACTS ?= ${ROOT_DIR}/_artifacts
+
+ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
+ KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
+else
+ KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
+endif
+
+.PHONY: test
+test: $(SETUP_ENVTEST) ## Run unit and integration tests
+ KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
+
+.PHONY: test-verbose
+test-verbose: ## Run unit and integration tests with verbose flag
+ $(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
+
+.PHONY: test-junit
+test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
+ set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
+ $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
+ exit $$(cat $(ARTIFACTS)/junit.exitcode)
+
+.PHONY: test-cover
+test-cover: ## Run unit and integration tests and generate a coverage report
+ $(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
+ go tool cover -func=out/coverage.out -o out/coverage.txt
+ go tool cover -html=out/coverage.out -o out/coverage.html
+
+.PHONY: test-e2e
+test-e2e: ## Run e2e tests
+ $(MAKE) -C $(TEST_DIR)/e2e run
+
+.PHONY: test-e2e-k3s
+test-e2e-k3s: ## Run e2e tests
+ $(MAKE) -C $(TEST_DIR)/e2e run-k3s
+
+## --------------------------------------
+## Release
+## --------------------------------------
+
+##@ release:
+
+## latest git tag for the commit, e.g., v0.3.10
+RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
+ifneq (,$(findstring -,$(RELEASE_TAG)))
+ PRE_RELEASE=true
+endif
+# the previous release tag, e.g., v0.3.9, excluding pre-release tags
+PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
+RELEASE_DIR := out
+
+$(RELEASE_DIR):
+ mkdir -p $(RELEASE_DIR)/
+
+.PHONY: release
+release: clean-release ## Build and push container images using the latest git tag for the commit
+ @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
+ @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
+ git checkout "${RELEASE_TAG}"
+ ## Build binaries first.
+ GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
+ # Set the manifest image to the production bucket.
+ $(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
+ ## Build the manifests
+ $(MAKE) release-manifests
+ ## Build the templates
+ $(MAKE) release-templates
+ ## Clean the git artifacts modified in the release process
+ $(MAKE) clean-release-git
+
+release-binaries: ## Build the binaries to publish with a release
+ RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
+ RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
+ RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
+ RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
+ RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
+ RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
+ RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
+ RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
+
+release-binary: $(RELEASE_DIR)
+ docker run \
+ --rm \
+ -e CGO_ENABLED=0 \
+ -e GOOS=$(GOOS) \
+ -e GOARCH=$(GOARCH) \
+ -e GOPROXY=$(GOPROXY) \
+ -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
+ -w /workspace \
+ golang:$(GO_VERSION) \
+ go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
+ -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
+
+release-archive: $(RELEASE_DIR)
+ tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
+ rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
+
+.PHONY: manifest-modification
+manifest-modification: # Set the manifest images to the staging/production bucket.
+ $(MAKE) set-manifest-image \
+ MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+ TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
+ $(MAKE) set-manifest-image \
+ MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+ TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
+ $(MAKE) set-manifest-image \
+ MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
+ TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
+ $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
+ $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
+ $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
+
+.PHONY: release-manifests
+release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
+ # Build capkk-components.
+ $(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
+ # Build bootstrap-components.
+ $(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
+ # Build control-plane-components.
+ $(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
+
+ # Add metadata to the release artifacts
+ cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
+
+.PHONY: release-templates
+release-templates: $(RELEASE_DIR) ## Generate release templates
+ cp templates/cluster-template*.yaml $(RELEASE_DIR)/
+
+.PHONY: release-prod
+release-prod: ## Build and push container images to the prod
+ REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
+
+## --------------------------------------
+## Docker
+## --------------------------------------
+
+.PHONY: docker-push-all
+docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
+ $(MAKE) docker-push-manifest-capkk
+ $(MAKE) docker-push-manifest-k3s-bootstrap
+ $(MAKE) docker-push-manifest-k3s-control-plane
+
+docker-push-%:
+ $(MAKE) ARCH=$* docker-push
+
+.PHONY: docker-push
+docker-push: ## Push the docker images
+ docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
+ docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
+ docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
+
+.PHONY: docker-push-manifest-capkk
+docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
+ ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+ docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
+ @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
+ docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
+
+.PHONY: docker-push-manifest-k3s-bootstrap
+docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
+ ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+ docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
+ @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
+ docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
+
+.PHONY: docker-push-manifest-k3s-control-plane
+docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
+ ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
+ docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
+ @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
+ docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
+
+.PHONY: set-manifest-pull-policy
+set-manifest-pull-policy:
+ $(info Updating kustomize pull policy file for manager resources)
+ sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
+
+.PHONY: set-manifest-image
+set-manifest-image:
+ $(info Updating kustomize image patch file for manager resource)
+ sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
+
+## --------------------------------------
+## Cleanup / Verification
+## --------------------------------------
+
+##@ clean:
+
+.PHONY: clean
+clean: ## Remove all generated files
+ $(MAKE) clean-bin
+
+.PHONY: clean-bin
+clean-bin: ## Remove all generated binaries
+ rm -rf $(BIN_DIR)
+ rm -rf $(TOOLS_BIN_DIR)
+
+.PHONY: clean-release
+clean-release: ## Remove the release folder
+ rm -rf $(RELEASE_DIR)
+
+.PHONY: clean-release-git
+clean-release-git: ## Restores the git files usually modified during a release
+ git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
+
+.PHONY: clean-generated-yaml
+clean-generated-yaml: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
+ (IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name '*.yaml' -exec rm -f {} \;; done)
+
+.PHONY: clean-generated-deepcopy
+clean-generated-deepcopy: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
+ (IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
+
+## --------------------------------------
+## Hack / Tools
+## --------------------------------------
+
+##@ hack/tools:
+
+.PHONY: $(CONTROLLER_GEN_BIN)
+$(CONTROLLER_GEN_BIN): $(CONTROLLER_GEN) ## Build a local copy of controller-gen.
+
+.PHONY: $(GOTESTSUM_BIN)
+$(GOTESTSUM_BIN): $(GOTESTSUM) ## Build a local copy of gotestsum.
+
+.PHONY: $(KUSTOMIZE_BIN)
+$(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize.
+
+.PHONY: $(SETUP_ENVTEST_BIN)
+$(SETUP_ENVTEST_BIN): $(SETUP_ENVTEST) ## Build a local copy of setup-envtest.
+
+.PHONY: $(GOLANGCI_LINT_BIN)
+$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
+
+$(CONTROLLER_GEN): # Build controller-gen from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
+
+$(GOTESTSUM): # Build gotestsum from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
+
+$(KUSTOMIZE): # Build kustomize from tools folder.
+ CGO_ENABLED=0 GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
+
+$(SETUP_ENVTEST): # Build setup-envtest from tools folder.
+ GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
+
+$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder.
+ hack/ensure-golangci-lint.sh \
+ -b $(TOOLS_BIN_DIR) \
+ $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
+
+# build the artifact of repository iso
+ISO_ARCH ?= amd64
+ISO_OUTPUT_DIR ?= ./output
+ISO_BUILD_WORKDIR := hack/gen-repository-iso
+ISO_OS_NAMES := centos7 debian9 debian10 ubuntu1604 ubuntu1804 ubuntu2004 ubuntu2204
+ISO_BUILD_NAMES := $(addprefix build-iso-,$(ISO_OS_NAMES))
+build-iso-all: $(ISO_BUILD_NAMES)
+.PHONY: $(ISO_BUILD_NAMES)
+$(ISO_BUILD_NAMES):
+ @export DOCKER_BUILDKIT=1
+ docker build \
+ --platform linux/$(ISO_ARCH) \
+ --build-arg TARGETARCH=$(ISO_ARCH) \
+ -o type=local,dest=$(ISO_OUTPUT_DIR) \
+ -f $(ISO_BUILD_WORKDIR)/dockerfile.$(subst build-iso-,,$@) \
+ $(ISO_BUILD_WORKDIR)
+
+go-releaser-test:
+ goreleaser release --rm-dist --skip-publish --snapshot
+
+
+.PHONY: generate-go-deepcopy-kubekey
+generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
+ $(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
+ $(CONTROLLER_GEN) \
+ object:headerFile=./hack/boilerplate.go.txt \
+ paths=./pkg/apis/... \
+
+.PHONY: generate-manifests-kubekey
+generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
+ $(CONTROLLER_GEN) \
+ paths=./pkg/apis/... \
+ crd \
+ output:crd:dir=./config/helm/crds/
+
+helm-package: ## Helm-package.
+ helm package config/helm -d ./bin
+
+.PHONY: docker-build-operator
+docker-build-operator: ## Build the docker image for operator
+ DOCKER_BUILDKIT=1 docker build --push --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG):$(TAG)
+
+# Format all import, `goimports` is required.
+goimports: ## Format all import, `goimports` is required.
+ @hack/update-goimports.sh
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 00000000..dd18c95f
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,18 @@
+approvers:
+ - pixiake
+ - 24sama
+ - rayzhou2017
+ - littleBlackHouse
+
+reviewers:
+ - pixiake
+ - rayzhou2017
+ - zryfish
+ - benjaminhuo
+ - calvinyv
+ - FeynmanZhou
+ - huanggze
+ - wansir
+ - LinuxSuRen
+ - 24sama
+ - littleBlackHouse
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..4ef81a99
--- /dev/null
+++ b/README.md
@@ -0,0 +1,8 @@
+# 背景
+当前kubekey中,如果要添加命令,或修改命令,都需要提交代码并重新发版。扩展性较差。
+1. 任务与框架分离(优势,目的,更方便扩展,借鉴ansible的playbook设计)
+2. 支持gitops(可通过git方式,管理自动化任务)
+3. 支持connector扩展
+4. 支持云原生方式自动化批量任务管理
+
+# 示例
diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go
new file mode 100644
index 00000000..75bcfe08
--- /dev/null
+++ b/cmd/controller-manager/app/options/options.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "flag"
+ "strings"
+
+ "github.com/spf13/cobra"
+ cliflag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+)
+
+type ControllerManagerServerOptions struct {
+ // Enable gops or not.
+ GOPSEnabled bool
+ // WorkDir is the baseDir which command find any resource (project etc.)
+ WorkDir string
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ Debug bool
+ // ControllerGates is the list of controller gates to enable or disable controller.
+ // '*' means "all enabled by default controllers"
+ // 'foo' means "enable 'foo'"
+ // '-foo' means "disable 'foo'"
+ // first item for a particular name wins.
+ // e.g. '-foo,foo' means "disable foo", 'foo,-foo' means "enable foo"
+ // * has the lowest priority.
+ // e.g. *,-foo, means "disable 'foo'"
+ ControllerGates []string
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
+ return &ControllerManagerServerOptions{
+ WorkDir: "/var/lib/kubekey",
+ ControllerGates: []string{"*"},
+ MaxConcurrentReconciles: 1,
+ }
+}
+
+func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ gfs := fss.FlagSet("generic")
+ gfs.BoolVar(&o.GOPSEnabled, "gops", o.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
+ "controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
+ gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+ gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
+
+ kfs := fss.FlagSet("klog")
+ local := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(local)
+ local.VisitAll(func(fl *flag.Flag) {
+ fl.Name = strings.Replace(fl.Name, "_", "-", -1)
+ kfs.AddGoFlag(fl)
+ })
+
+ cfs := fss.FlagSet("controller-manager")
+ cfs.StringSliceVar(&o.ControllerGates, "controllers", o.ControllerGates, "The list of controller gates to enable or disable controller. "+
+ "'*' means \"all enabled by default controllers\"")
+ cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
+ cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
+ return fss
+}
+
+func (o *ControllerManagerServerOptions) Complete(cmd *cobra.Command, args []string) {
+ // do nothing
+}
diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go
new file mode 100644
index 00000000..4e2f28fc
--- /dev/null
+++ b/cmd/controller-manager/app/server.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "context"
+ "io/fs"
+ "os"
+
+ "github.com/google/gops/agent"
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+
+ "github.com/kubesphere/kubekey/v4/cmd/controller-manager/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/manager"
+)
+
+func NewControllerManagerCommand() *cobra.Command {
+ o := options.NewControllerManagerServerOptions()
+
+ cmd := &cobra.Command{
+ Use: "controller-manager",
+ Short: "kubekey controller manager",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if o.GOPSEnabled {
+ // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
+ // Bind to a random port on address 127.0.0.1
+ if err := agent.Listen(agent.Options{}); err != nil {
+ return err
+ }
+ }
+
+ o.Complete(cmd, args)
+ // create workdir directory,if not exists
+ _const.SetWorkDir(o.WorkDir)
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
+ return err
+ }
+ }
+ return run(signals.SetupSignalHandler(), o)
+ },
+ }
+
+ fs := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ fs.AddFlagSet(f)
+ }
+ return cmd
+}
+
+func run(ctx context.Context, o *options.ControllerManagerServerOptions) error {
+ return manager.NewControllerManager(manager.ControllerManagerOptions{
+ ControllerGates: o.ControllerGates,
+ MaxConcurrentReconciles: o.MaxConcurrentReconciles,
+ LeaderElection: o.LeaderElection,
+ }).Run(ctx)
+}
diff --git a/cmd/controller-manager/controller_manager.go b/cmd/controller-manager/controller_manager.go
new file mode 100644
index 00000000..b5692ef7
--- /dev/null
+++ b/cmd/controller-manager/controller_manager.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "os"
+
+ "k8s.io/component-base/cli"
+
+ "github.com/kubesphere/kubekey/v4/cmd/controller-manager/app"
+)
+
+func main() {
+ command := app.NewControllerManagerCommand()
+ code := cli.Run(command)
+ os.Exit(code)
+}
diff --git a/cmd/kk/app/options/precheck.go b/cmd/kk/app/options/precheck.go
new file mode 100644
index 00000000..3e5e6e2d
--- /dev/null
+++ b/cmd/kk/app/options/precheck.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "fmt"
+
+ "github.com/google/uuid"
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/rand"
+ cliflag "k8s.io/component-base/cli/flag"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+)
+
+type PreCheckOptions struct {
+ // Playbook which to execute.
+ Playbook string
+ // HostFile is the path of host file
+ InventoryFile string
+ // ConfigFile is the path of config file
+ ConfigFile string
+ // WorkDir is the baseDir which command find any resource (project etc.)
+ WorkDir string
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ Debug bool
+}
+
+func NewPreCheckOption() *PreCheckOptions {
+ o := &PreCheckOptions{
+ WorkDir: "/var/lib/kubekey",
+ }
+ return o
+}
+
+func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ gfs := fss.FlagSet("generic")
+ gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+ gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ")
+ gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini")
+ gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
+
+ return fss
+}
+
+func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv1.Pipeline, error) {
+ kk := &kubekeyv1.Pipeline{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Pipeline",
+ APIVersion: "kubekey.kubesphere.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("precheck-%s", rand.String(6)),
+ Namespace: metav1.NamespaceDefault,
+ UID: types.UID(uuid.NewString()),
+ CreationTimestamp: metav1.Now(),
+ Annotations: map[string]string{
+ kubekeyv1.BuiltinsProjectAnnotation: "",
+ },
+ },
+ }
+
+ // complete playbook. now only support one playbook
+ if len(args) == 1 {
+ o.Playbook = args[0]
+ } else {
+ return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+
+ kk.Spec = kubekeyv1.PipelineSpec{
+ Playbook: o.Playbook,
+ Debug: o.Debug,
+ }
+ return kk, nil
+}
diff --git a/cmd/kk/app/options/run.go b/cmd/kk/app/options/run.go
new file mode 100644
index 00000000..ce6d83e5
--- /dev/null
+++ b/cmd/kk/app/options/run.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package options
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/rand"
+ cliflag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+)
+
+type KubekeyRunOptions struct {
+ // Enable gops or not.
+ GOPSEnabled bool
+ // WorkDir is the baseDir which command find any resource (project etc.)
+ WorkDir string
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ Debug bool
+ // ProjectAddr is the storage for executable packages (in Ansible format).
+ // When starting with http or https, it will be obtained from a Git repository.
+ // When starting with file path, it will be obtained from the local path.
+ ProjectAddr string
+ // ProjectName is the name of project. it will store to project dir use this name.
+ // If empty generate from ProjectAddr
+ ProjectName string
+ // ProjectBranch is the git branch of the git Addr.
+ ProjectBranch string
+ // ProjectTag if the git tag of the git Addr.
+ ProjectTag string
+ // ProjectInsecureSkipTLS skip tls or not when git addr is https.
+ ProjectInsecureSkipTLS bool
+ // ProjectToken auther
+ ProjectToken string
+ // Playbook which to execute.
+ Playbook string
+ // HostFile is the path of host file
+ InventoryFile string
+ // ConfigFile is the path of config file
+ ConfigFile string
+ // Tags is the tags of playbook which to execute
+ Tags []string
+ // SkipTags is the tags of playbook which skip execute
+ SkipTags []string
+}
+
+func NewKubeKeyRunOptions() *KubekeyRunOptions {
+ o := &KubekeyRunOptions{
+ WorkDir: "/var/lib/kubekey",
+ }
+ return o
+}
+
+func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets {
+ fss := cliflag.NamedFlagSets{}
+ gfs := fss.FlagSet("generic")
+ gfs.BoolVar(&o.GOPSEnabled, "gops", o.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
+ "controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
+ gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
+ gfs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "the config file path. support *.yaml ")
+ gfs.StringVar(&o.InventoryFile, "inventory", o.InventoryFile, "the host list file path. support *.ini")
+ gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
+
+ kfs := fss.FlagSet("klog")
+ local := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(local)
+ local.VisitAll(func(fl *flag.Flag) {
+ fl.Name = strings.Replace(fl.Name, "_", "-", -1)
+ kfs.AddGoFlag(fl)
+ })
+
+ gitfs := fss.FlagSet("project")
+ gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+
+ " When starting with http or https, it will be obtained from a Git repository."+
+ "When starting with file path, it will be obtained from the local path.")
+ gitfs.StringVar(&o.ProjectBranch, "project-branch", o.ProjectBranch, "the git branch of the remote Addr")
+ gitfs.StringVar(&o.ProjectTag, "project-tag", o.ProjectTag, "the git tag of the remote Addr")
+ gitfs.BoolVar(&o.ProjectInsecureSkipTLS, "project-insecure-skip-tls", o.ProjectInsecureSkipTLS, "skip tls or not when git addr is https.")
+ gitfs.StringVar(&o.ProjectToken, "project-token", o.ProjectToken, "the token for private project.")
+
+ tfs := fss.FlagSet("tags")
+ tfs.StringArrayVar(&o.Tags, "tags", o.Tags, "the tags of playbook which to execute")
+ tfs.StringArrayVar(&o.SkipTags, "skip_tags", o.SkipTags, "the tags of playbook which skip execute")
+
+ return fss
+}
+
+func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv1.Pipeline, error) {
+ kk := &kubekeyv1.Pipeline{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Pipeline",
+ APIVersion: "kubekey.kubesphere.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("run-command-%s", rand.String(6)),
+ Namespace: metav1.NamespaceDefault,
+ UID: types.UID(uuid.NewString()),
+ CreationTimestamp: metav1.Now(),
+ Annotations: map[string]string{},
+ },
+ }
+ // complete playbook. now only support one playbook
+ if len(args) == 1 {
+ o.Playbook = args[0]
+ } else {
+ return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
+ }
+
+ kk.Spec = kubekeyv1.PipelineSpec{
+ Project: kubekeyv1.PipelineProject{
+ Addr: o.ProjectAddr,
+ Name: o.ProjectName,
+ Branch: o.ProjectBranch,
+ Tag: o.ProjectTag,
+ InsecureSkipTLS: o.ProjectInsecureSkipTLS,
+ Token: o.ProjectToken,
+ },
+ Playbook: o.Playbook,
+ Tags: o.Tags,
+ SkipTags: o.SkipTags,
+ Debug: o.Debug,
+ }
+
+ return kk, nil
+}
diff --git a/cmd/kk/app/precheck.go b/cmd/kk/app/precheck.go
new file mode 100644
index 00000000..ea42c69e
--- /dev/null
+++ b/cmd/kk/app/precheck.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "io/fs"
+ "os"
+
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+func newPreCheckCommand() *cobra.Command {
+ o := options.NewPreCheckOption()
+
+ cmd := &cobra.Command{
+ Use: "precheck",
+ Short: "kk precheck for cluster",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ kk, err := o.Complete(cmd, []string{"playbooks/precheck.yaml"})
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
+ return err
+ }
+ }
+ return run(signals.SetupSignalHandler(), kk, o.ConfigFile, o.InventoryFile)
+ },
+ }
+
+ flags := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ flags.AddFlagSet(f)
+ }
+ return cmd
+}
diff --git a/cmd/kk/app/profiling.go b/cmd/kk/app/profiling.go
new file mode 100644
index 00000000..e3aca08d
--- /dev/null
+++ b/cmd/kk/app/profiling.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "runtime"
+ "runtime/pprof"
+
+ "github.com/spf13/pflag"
+)
+
+var (
+ profileName string
+ profileOutput string
+)
+
+func addProfilingFlags(flags *pflag.FlagSet) {
+ flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
+ flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
+}
+
+func initProfiling() error {
+ var (
+ f *os.File
+ err error
+ )
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ f, err = os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+ err = pprof.StartCPUProfile(f)
+ if err != nil {
+ return err
+ }
+ // Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
+ // output anything. We choose to sample all events.
+ case "block":
+ runtime.SetBlockProfileRate(1)
+ case "mutex":
+ runtime.SetMutexProfileFraction(1)
+ default:
+ // Check the profile name is valid.
+ if profile := pprof.Lookup(profileName); profile == nil {
+ return fmt.Errorf("unknown profile '%s'", profileName)
+ }
+ }
+
+ // If the command is interrupted before the end (ctrl-c), flush the
+ // profiling files
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ go func() {
+ <-c
+ f.Close()
+ flushProfiling()
+ os.Exit(0)
+ }()
+
+ return nil
+}
+
+func flushProfiling() error {
+ switch profileName {
+ case "none":
+ return nil
+ case "cpu":
+ pprof.StopCPUProfile()
+ case "heap":
+ runtime.GC()
+ fallthrough
+ default:
+ profile := pprof.Lookup(profileName)
+ if profile == nil {
+ return nil
+ }
+ f, err := os.Create(profileOutput)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ profile.WriteTo(f, 0)
+ }
+
+ return nil
+}
diff --git a/cmd/kk/app/run.go b/cmd/kk/app/run.go
new file mode 100644
index 00000000..64024e1f
--- /dev/null
+++ b/cmd/kk/app/run.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "context"
+ "io/fs"
+ "os"
+
+ "github.com/google/gops/agent"
+ "github.com/spf13/cobra"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/controller-runtime/pkg/manager/signals"
+ "sigs.k8s.io/yaml"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/manager"
+)
+
+func newRunCommand() *cobra.Command {
+ o := options.NewKubeKeyRunOptions()
+
+ cmd := &cobra.Command{
+ Use: "run [playbook]",
+ Short: "run a playbook",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if o.GOPSEnabled {
+ // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
+ // Bind to a random port on address 127.0.0.1
+ if err := agent.Listen(agent.Options{}); err != nil {
+ return err
+ }
+ }
+ kk, err := o.Complete(cmd, args)
+ if err != nil {
+ return err
+ }
+ // set workdir
+ _const.SetWorkDir(o.WorkDir)
+ // create workdir directory,if not exists
+ if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
+ if err := os.MkdirAll(o.WorkDir, fs.ModePerm); err != nil {
+ return err
+ }
+ }
+ // convert option to kubekeyv1.Pipeline
+ return run(signals.SetupSignalHandler(), kk, o.ConfigFile, o.InventoryFile)
+ },
+ }
+
+ fs := cmd.Flags()
+ for _, f := range o.Flags().FlagSets {
+ fs.AddFlagSet(f)
+ }
+ return cmd
+}
+
+func run(ctx context.Context, kk *kubekeyv1.Pipeline, configFile string, inventoryFile string) error {
+ // convert configFile
+ config := &kubekeyv1.Config{}
+ cdata, err := os.ReadFile(configFile)
+ if err != nil {
+ klog.Errorf("read config file error %v", err)
+ return err
+ }
+ if err := yaml.Unmarshal(cdata, config); err != nil {
+ klog.Errorf("unmarshal config file error %v", err)
+ return err
+ }
+ if config.Namespace == "" {
+ config.Namespace = corev1.NamespaceDefault
+ }
+ kk.Spec.ConfigRef = &corev1.ObjectReference{
+ Kind: config.Kind,
+ Namespace: config.Namespace,
+ Name: config.Name,
+ UID: config.UID,
+ APIVersion: config.APIVersion,
+ ResourceVersion: config.ResourceVersion,
+ }
+
+ // convert inventoryFile
+ inventory := &kubekeyv1.Inventory{}
+ idata, err := os.ReadFile(inventoryFile)
+ if err := yaml.Unmarshal(idata, inventory); err != nil {
+ klog.Errorf("unmarshal inventory file error %v", err)
+ return err
+ }
+ if inventory.Namespace == "" {
+ inventory.Namespace = corev1.NamespaceDefault
+ }
+ kk.Spec.InventoryRef = &corev1.ObjectReference{
+ Kind: inventory.Kind,
+ Namespace: inventory.Namespace,
+ Name: inventory.Name,
+ UID: inventory.UID,
+ APIVersion: inventory.APIVersion,
+ ResourceVersion: inventory.ResourceVersion,
+ }
+ return manager.NewCommandManager(manager.CommandManagerOptions{
+ Pipeline: kk,
+ Config: config,
+ Inventory: inventory,
+ }).Run(ctx)
+}
diff --git a/cmd/kk/app/server.go b/cmd/kk/app/server.go
new file mode 100644
index 00000000..45150210
--- /dev/null
+++ b/cmd/kk/app/server.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func NewKubeKeyCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "kk",
+ Long: "kubekey is a daemon that execute command in a node",
+ PersistentPreRunE: func(*cobra.Command, []string) error {
+ return initProfiling()
+ },
+ PersistentPostRunE: func(*cobra.Command, []string) error {
+ return flushProfiling()
+ },
+ }
+
+ flags := cmd.PersistentFlags()
+ addProfilingFlags(flags)
+
+ cmd.AddCommand(newRunCommand())
+ cmd.AddCommand(newVersionCommand())
+
+ // internal command
+ cmd.AddCommand(newPreCheckCommand())
+ return cmd
+}
diff --git a/cmd/kk/app/version.go b/cmd/kk/app/version.go
new file mode 100644
index 00000000..d5e85cb9
--- /dev/null
+++ b/cmd/kk/app/version.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/kubesphere/kubekey/v4/version"
+)
+
+func newVersionCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "version",
+ Short: "Print the version of KubeSphere controller-manager",
+ Run: func(cmd *cobra.Command, args []string) {
+ cmd.Println(version.Get())
+ },
+ }
+}
diff --git a/cmd/kk/kubekey.go b/cmd/kk/kubekey.go
new file mode 100644
index 00000000..e9ff56d2
--- /dev/null
+++ b/cmd/kk/kubekey.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "os"
+
+ "k8s.io/component-base/cli"
+
+ "github.com/kubesphere/kubekey/v4/cmd/kk/app"
+)
+
+func main() {
+ command := app.NewKubeKeyCommand()
+ code := cli.Run(command)
+ os.Exit(code)
+}
diff --git a/config/helm/Chart.yaml b/config/helm/Chart.yaml
new file mode 100644
index 00000000..8c122f47
--- /dev/null
+++ b/config/helm/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v2
+name: kubekey
+description: A Helm chart for kubekey
+
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.4.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+appVersion: "v4.0.0"
diff --git a/config/helm/crds/kubekey.kubesphere.io_configs.yaml b/config/helm/crds/kubekey.kubesphere.io_configs.yaml
new file mode 100644
index 00000000..fbf7625a
--- /dev/null
+++ b/config/helm/crds/kubekey.kubesphere.io_configs.yaml
@@ -0,0 +1,38 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.13.0
+ name: configs.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Config
+ listKind: ConfigList
+ plural: configs
+ singular: config
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: true
diff --git a/config/helm/crds/kubekey.kubesphere.io_inventories.yaml b/config/helm/crds/kubekey.kubesphere.io_inventories.yaml
new file mode 100644
index 00000000..5e4875d1
--- /dev/null
+++ b/config/helm/crds/kubekey.kubesphere.io_inventories.yaml
@@ -0,0 +1,66 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.13.0
+ name: inventories.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Inventory
+ listKind: InventoryList
+ plural: inventories
+ singular: inventory
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ groups:
+ additionalProperties:
+ properties:
+ groups:
+ items:
+ type: string
+ type: array
+ hosts:
+ items:
+ type: string
+ type: array
+ vars:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: Groups nodes. a group contains repeated nodes
+ type: object
+ hosts:
+ additionalProperties:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ description: Hosts is all nodes
+ type: object
+ vars:
+ description: 'Vars for all host. the priority for vars is: host vars
+ > group vars > inventory vars'
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: object
+ served: true
+ storage: true
diff --git a/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml b/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml
new file mode 100644
index 00000000..0500d2e7
--- /dev/null
+++ b/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml
@@ -0,0 +1,225 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.13.0
+ name: pipelines.kubekey.kubesphere.io
+spec:
+ group: kubekey.kubesphere.io
+ names:
+ kind: Pipeline
+ listKind: PipelineList
+ plural: pipelines
+ singular: pipeline
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.playbook
+ name: Playbook
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.taskResult.total
+ name: Total
+ type: integer
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ configRef:
+ description: ConfigRef is the global variable configuration for playbook
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of
+ an entire object, this string should contain a valid JSON/Go
+ field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within
+ a pod, this would take on a value like: "spec.containers{name}"
+ (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]"
+ (container with index 2 in this pod). This syntax is chosen
+ only to have some well-defined way of referencing a part of
+ an object. TODO: this design is not final and this field is
+ subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference
+ is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ debug:
+ description: Debug mode, after a successful execution of Pipeline,
+ will retain runtime data, which includes task execution status and
+ parameters.
+ type: boolean
+ inventoryRef:
+ description: InventoryRef is the node configuration for playbook
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of
+ an entire object, this string should contain a valid JSON/Go
+ field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within
+ a pod, this would take on a value like: "spec.containers{name}"
+ (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]"
+ (container with index 2 in this pod). This syntax is chosen
+ only to have some well-defined way of referencing a part of
+ an object. TODO: this design is not final and this field is
+ subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference
+ is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ playbook:
+ description: Playbook which to execute.
+ type: string
+ project:
+ description: Project is storage for executable packages
+ properties:
+ addr:
+ description: Addr is the storage for executable packages (in Ansible
+ file format). When starting with http or https, it will be obtained
+ from a Git repository. When starting with file path, it will
+ be obtained from the local path.
+ type: string
+ branch:
+ description: Branch is the git branch of the git Addr.
+ type: string
+ insecureSkipTLS:
+ description: InsecureSkipTLS skip tls or not when git addr is
+ https.
+ type: boolean
+ name:
+ description: Name is the project name base project
+ type: string
+ tag:
+ description: Tag is the git branch of the git Addr.
+ type: string
+ token:
+ description: Token of Authorization for http request
+ type: string
+ type: object
+ skipTags:
+ description: SkipTags is the tags of playbook which skip execute
+ items:
+ type: string
+ type: array
+ tags:
+ description: Tags is the tags of playbook which to execute
+ items:
+ type: string
+ type: array
+ required:
+ - playbook
+ type: object
+ status:
+ properties:
+ failedDetail:
+ description: FailedDetail will record the failed tasks.
+ items:
+ properties:
+ hosts:
+ description: failed Hosts Result of failed task.
+ items:
+ properties:
+ host:
+ description: Host name of failed task.
+ type: string
+ stdErr:
+ description: StdErr of failed task.
+ type: string
+ stdout:
+ description: Stdout of failed task.
+ type: string
+ type: object
+ type: array
+ task:
+ description: Task name of failed task.
+ type: string
+ type: object
+ type: array
+ phase:
+ description: Phase of pipeline.
+ type: string
+ reason:
+ description: failed Reason of pipeline.
+ type: string
+ taskResult:
+ description: TaskResult total related tasks execute result.
+ properties:
+ failed:
+ description: Failed number of tasks.
+ type: integer
+ ignored:
+ description: Ignored number of tasks.
+ type: integer
+ skipped:
+ description: Skipped number of tasks.
+ type: integer
+ success:
+ description: Success number of tasks.
+ type: integer
+ total:
+ description: Total number of tasks.
+ type: integer
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/helm/templates/_helpers.tpl b/config/helm/templates/_helpers.tpl
new file mode 100644
index 00000000..f51c77d6
--- /dev/null
+++ b/config/helm/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+{{/*
+Common labels
+*/}}
+{{- define "common.labels" -}}
+helm.sh/chart: {{ include "common.chart" . }}
+{{ include "common.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "common.selectorLabels" -}}
+app.kubernetes.io/name: {{ .Chart.Name }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "common.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+
+{{- define "common.image" -}}
+{{- $registryName := .Values.operator.image.registry -}}
+{{- $repositoryName := .Values.operator.image.repository -}}
+{{- $separator := ":" -}}
+{{- $termination := .Values.operator.image.tag | toString -}}
+{{- if .Values.operator.image.digest }}
+ {{- $separator = "@" -}}
+ {{- $termination = .Values.operator.image.digest | toString -}}
+{{- end -}}
+{{- if $registryName }}
+{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
+{{- else }}
+{{- printf "%s%s%s" $repositoryName $separator $termination -}}
+{{- end -}}
+{{- end -}}
diff --git a/config/helm/templates/_tplvalues.tpl b/config/helm/templates/_tplvalues.tpl
new file mode 100644
index 00000000..2db16685
--- /dev/null
+++ b/config/helm/templates/_tplvalues.tpl
@@ -0,0 +1,13 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/config/helm/templates/deployment.yaml b/config/helm/templates/deployment.yaml
new file mode 100644
index 00000000..e34787c2
--- /dev/null
+++ b/config/helm/templates/deployment.yaml
@@ -0,0 +1,70 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels: {{ include "common.labels" . | nindent 4 }}
+ app: kk-operator
+ name: kk-operator
+ namespace: {{ .Release.Namespace }}
+spec:
+ strategy:
+ rollingUpdate:
+ maxSurge: 0
+ type: RollingUpdate
+ progressDeadlineSeconds: 600
+ replicas: {{ .Values.operator.replicaCount }}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app: kk-operator
+ template:
+ metadata:
+ labels: {{ include "common.labels" . | nindent 8 }}
+ app: kk-operator
+ spec:
+ serviceAccountName: {{ .Values.serviceAccount.name }}
+ {{- if .Values.operator.pullSecrets }}
+ imagePullSecrets: {{ .Values.operator.pullSecrets }}
+ {{- end }}
+ {{- if .Values.operator.nodeSelector }}
+ nodeSelector: {{ .Values.operator.nodeSelector }}
+ {{- end }}
+ {{- if .Values.operator.affinity }}
+ affinity: {{ .Values.operator.affinity }}
+ {{- end }}
+ {{- if .Values.operator.tolerations }}
+ tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.operator.tolerations "context" .) | nindent 8 }}
+ {{- end }}
+ dnsPolicy: {{ .Values.operator.dnsPolicy }}
+ restartPolicy: {{ .Values.operator.restartPolicy }}
+ schedulerName: {{ .Values.operator.schedulerName }}
+ terminationGracePeriodSeconds: {{ .Values.operator.terminationGracePeriodSeconds }}
+ containers:
+ - name: ks-controller-manager
+ image: {{ template "common.image" . }}
+ imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
+ {{- if .Values.operator.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" .Values.operator.command "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ {{- if .Values.operator.extraEnvVars }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if .Values.operator.resources }}
+ resources: {{- toYaml .Values.operator.resources | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: host-time
+ readOnly: true
+ {{- if .Values.operator.extraVolumeMounts }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumeMounts "context" $) | nindent 12 }}
+ {{- end }}
+ volumes:
+ - hostPath:
+ path: /etc/localtime
+ type: ""
+ name: host-time
+ {{- if .Values.operator.extraVolumes }}
+ {{- include "common.tplvalues.render" (dict "value" .Values.operator.extraVolumes "context" $) | nindent 8 }}
+ {{- end }}
diff --git a/config/helm/templates/role.yaml b/config/helm/templates/role.yaml
new file mode 100644
index 00000000..11eaaeeb
--- /dev/null
+++ b/config/helm/templates/role.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.role }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - configs
+ - inventories
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubekey.kubesphere.io
+ resources:
+ - pipelines
+ - pipelines/status
+ verbs:
+ - "*"
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - "*"
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - "*"
diff --git a/config/helm/templates/serviceaccount.yaml b/config/helm/templates/serviceaccount.yaml
new file mode 100644
index 00000000..43ca3988
--- /dev/null
+++ b/config/helm/templates/serviceaccount.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.name }}
+ namespace: {{ .Release.Namespace }}
+ labels: {{- include "common.labels" . | nindent 4}}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.serviceAccount.name }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.role }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.name }}
+ namespace: {{ .Release.Namespace }}
diff --git a/config/helm/values.yaml b/config/helm/values.yaml
new file mode 100644
index 00000000..25ff8a3f
--- /dev/null
+++ b/config/helm/values.yaml
@@ -0,0 +1,84 @@
+## @section Common parameters
+##
+# the role which operator pod need
+role: "kk-operator"
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: "kk-operator"
+
+
+operator:
+ # tolerations of operator pod
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ tolerationSeconds: 60
+ - effect: NoExecute
+ key: node.kubernetes.io/unreachable
+ operator: Exists
+ tolerationSeconds: 60
+ # affinity of operator pod
+ affinity: { }
+ # nodeSelector of operator pod
+ nodeSelector: { }
+ # dnsPolicy of operator pod
+ dnsPolicy: Default
+ # restartPolicy of operator pod
+ restartPolicy: Always
+ # schedulerName of operator pod
+ schedulerName: default-scheduler
+ # terminationGracePeriodSeconds of operator pod
+ terminationGracePeriodSeconds: 30
+ # replica of operator deployment
+ replicaCount: 1
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ## e.g:
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ pullSecrets: []
+ image:
+ registry: ""
+ repository: kubesphere/kubekey-operator
+ tag: ""
+ digest: ""
+ pullPolicy: IfNotPresent
+ ##
+ ## @param resources.limits The resources limits for the haproxy containers
+ ## @param resources.requests The requested resources for the haproxy containers
+ ##
+ resources:
+ limits:
+ cpu: 1
+ memory: 1000Mi
+ requests:
+ cpu: 30m
+ memory: 50Mi
+ ## @param command Override default container command (useful when using custom images)
+ ##
+ command:
+ - controller-manager
+ - --logtostderr=true
+ - --leader-election=true
+ - --controllers=*
+ ## @param extraEnvVars Array with extra environment variables to add to haproxy nodes
+ ##
+ extraEnvVars: []
+ ## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the haproxy container(s)
+ ##
+ extraVolumeMounts: []
+ ## @param extraVolumes Optionally specify extra list of additional volumes for the haproxy pod(s)
+ ##
+ extraVolumes: []
diff --git a/example/Makefile b/example/Makefile
new file mode 100644
index 00000000..a24082b0
--- /dev/null
+++ b/example/Makefile
@@ -0,0 +1,23 @@
+BaseDir := $(CURDIR)/..
+playbooks := bootstrap-os.yaml
+
+.PHONY: build
+build:
+ go build -o $(BaseDir)/example -gcflags all=-N github.com/kubesphere/kubekey/v4/cmd/kk
+
+.PHONY: run-playbook
+run-playbook: build
+ @for pb in $(playbooks); do \
+ $(BaseDir)/example/kk run --work-dir=$(BaseDir)/example/test \
+ --project-addr=git@github.com:littleBlackHouse/kse-installer.git \
+ --project-branch=demo --inventory=$(BaseDir)/example/inventory.yaml \
+ --config=$(BaseDir)/example/config.yaml \
+ --debug playbooks/$$pb;\
+ done
+
+.PHONY: precheck
+precheck: build
+ $(BaseDir)/example/kk precheck --work-dir=$(BaseDir)/example/test \
+ --inventory=$(BaseDir)/example/inventory.yaml \
+ --config=$(BaseDir)/example/config.yaml
+
diff --git a/example/config.yaml b/example/config.yaml
new file mode 100644
index 00000000..19482c05
--- /dev/null
+++ b/example/config.yaml
@@ -0,0 +1,19 @@
+apiVersion: kubekey.kubesphere.io/v1
+kind: Config
+metadata:
+ name: example
+spec:
+ etcd_deployment_type: external
+ supported_os_distributions: [ ubuntu ]
+ kube_network_plugin: flannel
+ kube_version: 1.23.15
+ kube_version_min_required: 1.19.10
+ download_run_once: true
+ minimal_master_memory_mb: 10 #KB
+ minimal_node_memory_mb: 10 #KB
+ kube_network_node_prefix: 24
+ container_manager: containerd
+ containerd_version: v1.7.0
+ containerd_min_version_required: v1.6.0
+ kube_external_ca_mode: true
+ cilium_deploy_additionally: true
diff --git a/example/inventory.yaml b/example/inventory.yaml
new file mode 100644
index 00000000..d78c7967
--- /dev/null
+++ b/example/inventory.yaml
@@ -0,0 +1,26 @@
+apiVersion: kubekey.kubesphere.io/v1
+kind: Inventory
+metadata:
+ name: example
+spec:
+ hosts:
+ kk:
+ ssh_host: xxx
+ groups:
+ k8s_cluster:
+ groups:
+ - kube_control_plane
+ - kube_node
+ kube_control_plane:
+ hosts:
+ - kk
+ kube_node:
+ hosts:
+ - kk
+ etcd:
+ hosts:
+ - kk
+ vars:
+ ssh_port: xxx
+ ssh_user: xxx
+ ssh_password: xxx
diff --git a/example/pipeline.yaml b/example/pipeline.yaml
new file mode 100644
index 00000000..fd346534
--- /dev/null
+++ b/example/pipeline.yaml
@@ -0,0 +1,18 @@
+apiVersion: kubekey.kubesphere.io/v1
+kind: Pipeline
+metadata:
+ name: precheck-example
+ annotations:
+ "kubekey.kubesphere.io/builtins-repo": ""
+spec:
+ playbook: playbooks/precheck.yaml
+ inventoryRef:
+ apiVersion: kubekey.kubesphere.io/v1
+ kind: Inventory
+ name: example
+ namespace: default
+ configRef:
+ apiVersion: kubekey.kubesphere.io/v1
+ kind: Config
+ name: example
+ namespace: default
diff --git a/exp/README.md b/exp/README.md
new file mode 100644
index 00000000..3808b322
--- /dev/null
+++ b/exp/README.md
@@ -0,0 +1,7 @@
+# Experimental
+
+⚠️ This package holds experimental code and API types. ⚠️
+
+## Compatibility notice
+
+This package does not adhere to any compatibility guarantees. Some portions may eventually be promoted out of this package and considered stable/GA, while others may be removed entirely.
\ No newline at end of file
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..6d80c0cd
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,94 @@
+module github.com/kubesphere/kubekey/v4
+
+go 1.20
+
+require (
+ github.com/evanphx/json-patch v5.7.0+incompatible
+ github.com/flosch/pongo2/v6 v6.0.0
+ github.com/go-git/go-git/v5 v5.11.0
+ github.com/google/gops v0.3.28
+ github.com/google/uuid v1.5.0
+ github.com/pkg/sftp v1.13.6
+ github.com/spf13/cobra v1.8.0
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.8.4
+ golang.org/x/crypto v0.17.0
+ golang.org/x/time v0.5.0
+ gopkg.in/yaml.v3 v3.0.1
+ k8s.io/api v0.29.0
+ k8s.io/apimachinery v0.29.0
+ k8s.io/client-go v0.29.0
+ k8s.io/component-base v0.29.0
+ k8s.io/klog/v2 v2.110.1
+ k8s.io/utils v0.0.0-20240102154912-e7106e64919e
+ sigs.k8s.io/controller-runtime v0.16.3
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ dario.cat/mergo v1.0.0 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/emicklei/go-restful/v3 v3.11.1 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
+ github.com/evanphx/json-patch/v5 v5.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
+ github.com/go-git/go-billy/v5 v5.5.0 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-openapi/jsonpointer v0.20.2 // indirect
+ github.com/go-openapi/jsonreference v0.20.4 // indirect
+ github.com/go-openapi/swag v0.22.7 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/kr/fs v0.1.0 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pjbgf/sha1cd v0.3.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.45.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/sergi/go-diff v1.3.1 // indirect
+ github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
+ golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.19.0 // indirect
+ golang.org/x/oauth2 v0.15.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
+ golang.org/x/term v0.15.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.16.1 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/protobuf v1.32.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/apiextensions-apiserver v0.29.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..66c5eb1d
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,311 @@
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
+github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk=
+github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
+github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
+github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
+github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/flosch/pongo2/v6 v6.0.0 h1:lsGru8IAzHgIAw6H2m4PCyleO58I40ow6apih0WprMU=
+github.com/flosch/pongo2/v6 v6.0.0/go.mod h1:CuDpFm47R0uGGE7z13/tTlt1Y6zdxvr2RLT5LJhsHEU=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
+github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
+github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
+github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
+github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
+github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
+github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
+github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
+github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
+github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
+github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
+github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
+github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
+github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
+github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
+golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
+golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
+golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
+gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
+k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
+k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=
+k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
+k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=
+k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis=
+k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
+k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38=
+k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
+k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
+k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
+k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
+k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 h1:yHNkNuLjht7iq95pO9QmbjOWCguvn8mDe3lT78nqPkw=
+k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 h1:+XYBQU3ZKUu60H6fEnkitTTabGoKfIG8zczhZBENu9o=
+k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw=
+k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
+k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
+k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
+sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/hack/auto-update-version.py b/hack/auto-update-version.py
new file mode 100755
index 00000000..f9fa93f9
--- /dev/null
+++ b/hack/auto-update-version.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+
+# Copyright 2022 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import re
+import json
+from natsort import natsorted
+import collections
+
+GITHUB_BASE_URL = "https://api.github.com"
+ORG = "kubernetes"
+REPO = "kubernetes"
+PER_PAGE = 15
+
+ARCH_LIST = ["amd64", "arm64"]
+K8S_COMPONENTS = ["kubeadm", "kubelet", "kubectl"]
+
+
+def get_releases(org, repo, per_page=30):
+ try:
+ response = requests.get("{}/repos/{}/{}/releases?per_page={}".format(GITHUB_BASE_URL, org, repo, per_page))
+ except:
+ print("fetch {}/{} releases failed".format(org, repo))
+ else:
+ return response.json()
+
+
+def get_new_kubernetes_version(current_version):
+ new_versions = []
+
+ kubernetes_release = get_releases(org=ORG, repo=REPO, per_page=PER_PAGE)
+
+ for release in kubernetes_release:
+ tag = release['tag_name']
+ res = re.search("^v[0-9]+.[0-9]+.[0-9]+$", tag)
+ if res and tag not in current_version['kubeadm']['amd64'].keys():
+ new_versions.append(tag)
+
+ return new_versions
+
+
+def fetch_kubernetes_sha256(versions):
+ new_sha256 = {}
+
+ for version in versions:
+ for binary in K8S_COMPONENTS:
+ for arch in ARCH_LIST:
+ response = requests.get(
+ "https://storage.googleapis.com/kubernetes-release/release/{}/bin/linux/{}/{}.sha256".format(
+ version, arch, binary))
+ if response.status_code == 200:
+ new_sha256["{}-{}-{}".format(binary, arch, version)] = response.text
+
+ return new_sha256
+
+
+def version_sort(data):
+ version_list = natsorted([*data])
+ sorted_data = collections.OrderedDict()
+
+ for v in version_list:
+ sorted_data[v] = data[v]
+
+ return sorted_data
+
+
+def main():
+ # get current support versions
+ with open("version/components.json", "r") as f:
+ data = json.load(f)
+
+ # get new kubernetes versions
+ new_versions = get_new_kubernetes_version(current_version=data)
+
+ if len(new_versions) > 0:
+ # fetch new kubernetes sha256
+ new_sha256 = fetch_kubernetes_sha256(new_versions)
+
+ if new_sha256:
+ for k, v in new_sha256.items():
+ info = k.split('-')
+ data[info[0]][info[1]][info[2]] = v
+
+ for binary in K8S_COMPONENTS:
+ for arch in ARCH_LIST:
+ data[binary][arch] = version_sort(data[binary][arch])
+
+ print(new_versions)
+ # update components.json
+ with open("version/components.json", 'w') as f:
+ json.dump(data, f, indent=4, ensure_ascii=False)
+
+ # set new version to tmp file
+ with open("version.tmp", 'w') as f:
+ f.write("\n".join(new_versions))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
new file mode 100644
index 00000000..68fe49d3
--- /dev/null
+++ b/hack/boilerplate.go.txt
@@ -0,0 +1,15 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
diff --git a/hack/ensure-golangci-lint.sh b/hack/ensure-golangci-lint.sh
new file mode 100755
index 00000000..8c2729eb
--- /dev/null
+++ b/hack/ensure-golangci-lint.sh
@@ -0,0 +1,422 @@
+#!/usr/bin/env bash
+
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# NOTE: This script is copied from from https://raw.githubusercontent.com/golangci/golangci-lint/main/install.sh.
+
+set -e
+
+usage() {
+ this=$1
+ cat </dev/null
+}
+echoerr() {
+ echo "$@" 1>&2
+}
+log_prefix() {
+ echo "$0"
+}
+_logp=6
+log_set_priority() {
+ _logp="$1"
+}
+log_priority() {
+ if test -z "$1"; then
+ echo "$_logp"
+ return
+ fi
+ [ "$1" -le "$_logp" ]
+}
+log_tag() {
+ case $1 in
+ 0) echo "emerg" ;;
+ 1) echo "alert" ;;
+ 2) echo "crit" ;;
+ 3) echo "err" ;;
+ 4) echo "warning" ;;
+ 5) echo "notice" ;;
+ 6) echo "info" ;;
+ 7) echo "debug" ;;
+ *) echo "$1" ;;
+ esac
+}
+log_debug() {
+ log_priority 7 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
+}
+log_info() {
+ log_priority 6 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
+}
+log_err() {
+ log_priority 3 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
+}
+log_crit() {
+ log_priority 2 || return 0
+ echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
+}
+uname_os() {
+ os=$(uname -s | tr '[:upper:]' '[:lower:]')
+ case "$os" in
+ cygwin_nt*) os="windows" ;;
+ mingw*) os="windows" ;;
+ msys_nt*) os="windows" ;;
+ esac
+ echo "$os"
+}
+uname_arch() {
+ arch=$(uname -m)
+ case $arch in
+ x86_64) arch="amd64" ;;
+ x86) arch="386" ;;
+ i686) arch="386" ;;
+ i386) arch="386" ;;
+ aarch64) arch="arm64" ;;
+ armv5*) arch="armv5" ;;
+ armv6*) arch="armv6" ;;
+ armv7*) arch="armv7" ;;
+ esac
+ echo ${arch}
+}
+uname_os_check() {
+ os=$(uname_os)
+ case "$os" in
+ darwin) return 0 ;;
+ dragonfly) return 0 ;;
+ freebsd) return 0 ;;
+ linux) return 0 ;;
+ android) return 0 ;;
+ nacl) return 0 ;;
+ netbsd) return 0 ;;
+ openbsd) return 0 ;;
+ plan9) return 0 ;;
+ solaris) return 0 ;;
+ windows) return 0 ;;
+ esac
+ log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
+ return 1
+}
+uname_arch_check() {
+ arch=$(uname_arch)
+ case "$arch" in
+ 386) return 0 ;;
+ amd64) return 0 ;;
+ arm64) return 0 ;;
+ armv5) return 0 ;;
+ armv6) return 0 ;;
+ armv7) return 0 ;;
+ ppc64) return 0 ;;
+ ppc64le) return 0 ;;
+ mips) return 0 ;;
+ mipsle) return 0 ;;
+ mips64) return 0 ;;
+ mips64le) return 0 ;;
+ s390x) return 0 ;;
+ amd64p32) return 0 ;;
+ esac
+ log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
+ return 1
+}
+untar() {
+ tarball=$1
+ case "${tarball}" in
+ *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
+ *.tar) tar --no-same-owner -xf "${tarball}" ;;
+ *.zip) unzip "${tarball}" ;;
+ *)
+ log_err "untar unknown archive format for ${tarball}"
+ return 1
+ ;;
+ esac
+}
+http_download_curl() {
+ local_file=$1
+ source_url=$2
+ header=$3
+ if [ -z "$header" ]; then
+ code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
+ else
+ code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
+ fi
+ if [ "$code" != "200" ]; then
+ log_debug "http_download_curl received HTTP status $code"
+ return 1
+ fi
+ return 0
+}
+http_download_wget() {
+ local_file=$1
+ source_url=$2
+ header=$3
+ if [ -z "$header" ]; then
+ wget -q -O "$local_file" "$source_url"
+ else
+ wget -q --header "$header" -O "$local_file" "$source_url"
+ fi
+}
+http_download() {
+ log_debug "http_download $2"
+ if is_command curl; then
+ http_download_curl "$@"
+ return
+ elif is_command wget; then
+ http_download_wget "$@"
+ return
+ fi
+ log_crit "http_download unable to find wget or curl"
+ return 1
+}
+http_copy() {
+ tmp=$(mktemp)
+ http_download "${tmp}" "$1" "$2" || return 1
+ body=$(cat "$tmp")
+ rm -f "${tmp}"
+ echo "$body"
+}
+github_release() {
+ owner_repo=$1
+ version=$2
+ test -z "$version" && version="latest"
+ giturl="https://github.com/${owner_repo}/releases/${version}"
+ json=$(http_copy "$giturl" "Accept:application/json")
+ test -z "$json" && return 1
+ version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
+ test -z "$version" && return 1
+ echo "$version"
+}
+hash_sha256() {
+ TARGET=${1:-/dev/stdin}
+ if is_command gsha256sum; then
+ hash=$(gsha256sum "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command sha256sum; then
+ hash=$(sha256sum "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command shasum; then
+ hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
+ echo "$hash" | cut -d ' ' -f 1
+ elif is_command openssl; then
+ hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
+ echo "$hash" | cut -d ' ' -f a
+ else
+ log_crit "hash_sha256 unable to find command to compute sha-256 hash"
+ return 1
+ fi
+}
+hash_sha256_verify() {
+ TARGET=$1
+ checksums=$2
+ if [ -z "$checksums" ]; then
+ log_err "hash_sha256_verify checksum file not specified in arg2"
+ return 1
+ fi
+ BASENAME=${TARGET##*/}
+ want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
+ if [ -z "$want" ]; then
+ log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
+ return 1
+ fi
+ got=$(hash_sha256 "$TARGET")
+ if [ "$want" != "$got" ]; then
+ log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
+ return 1
+ fi
+}
+cat /dev/null < "${file}" && echo -e "\n\nThe hash info have saved to file ${file}.\n\n"
diff --git a/hack/gen-repository-iso/dockerfile.almalinux90 b/hack/gen-repository-iso/dockerfile.almalinux90
new file mode 100644
index 00000000..a90c8b70
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.almalinux90
@@ -0,0 +1,21 @@
+FROM almalinux:9.0 as almalinux90
+ARG TARGETARCH
+ARG BUILD_TOOLS="dnf-plugins-core createrepo mkisofs epel-release"
+ARG DIR=almalinux-9.0-${TARGETARCH}-rpms
+ARG PKGS=.common[],.rpms[],.almalinux[],.almalinux90[]
+
+RUN dnf install -q -y ${BUILD_TOOLS} \
+ && dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
+ && dnf makecache
+
+WORKDIR package
+COPY packages.yaml .
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval ${PKGS} packages.yaml | sed '/^ceph-common$/d' > packages.list
+
+RUN sort -u packages.list | xargs dnf download --resolve --alldeps --downloaddir=${DIR} \
+ && createrepo -d ${DIR} \
+ && mkisofs -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=almalinux90 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.centos7 b/hack/gen-repository-iso/dockerfile.centos7
new file mode 100644
index 00000000..e0ed0cfc
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.centos7
@@ -0,0 +1,22 @@
+FROM centos:7 as centos7
+ARG TARGETARCH
+ENV OS=centos
+ENV OS_VERSION=7
+ARG BUILD_TOOLS="yum-utils createrepo mkisofs epel-release"
+ARG DIR=${OS}${OS_VERSION}-${TARGETARCH}-rpms
+
+RUN yum install -q -y ${BUILD_TOOLS} \
+ && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \
+ && yum makecache
+
+WORKDIR package
+COPY packages.yaml .
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval ".common[],.rpms[],.${OS}[],.${OS}${OS_VERSION}[]" packages.yaml > packages.list
+
+RUN sort -u packages.list | xargs repotrack -p ${DIR} \
+ && createrepo -d ${DIR} \
+ && mkisofs -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=centos7 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.debian10 b/hack/gen-repository-iso/dockerfile.debian10
new file mode 100644
index 00000000..635a124d
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.debian10
@@ -0,0 +1,38 @@
+FROM debian:10 as debian10
+ARG TARGETARCH
+ARG OS_RELEASE=buster
+ARG OS_VERSION=10
+ARG DIR=debian-10-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.debian[],.debian10[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN ARCH=$(dpkg --print-architecture) \
+ && apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && if [ "$TARGETARCH" = "amd64" ]; then \
+ curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
+ echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
+ fi \
+ && curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=debian10 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.debian11 b/hack/gen-repository-iso/dockerfile.debian11
new file mode 100644
index 00000000..f99dd95f
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.debian11
@@ -0,0 +1,41 @@
+FROM debian:11.6 as debian11
+ARG TARGETARCH
+ARG OS_RELEASE=bullseye
+ARG OS_VERSION=11
+ARG DIR=debian-11-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.debian[],.debian11[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage dirmngr"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN ARCH=$(dpkg --print-architecture) \
+ && apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && if [ "$TARGETARCH" = "amd64" ]; then \
+ curl -fsSL https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | apt-key add - ; \
+ echo deb https://download.gluster.org/pub/gluster/glusterfs/7/LATEST/Debian/${OS_VERSION}/amd64/apt ${OS_RELEASE} main > /etc/apt/sources.list.d/gluster.list ; \
+ fi \
+ && curl -fsSL "https://download.docker.com/linux/debian/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/debian ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list \
+ && apt update -qq \
+ && apt upgrade -y -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.30.8 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN cat packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=debian11 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.ubuntu1604 b/hack/gen-repository-iso/dockerfile.ubuntu1604
new file mode 100644
index 00000000..71969819
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.ubuntu1604
@@ -0,0 +1,33 @@
+FROM ubuntu:16.04 as ubuntu1604
+ARG TARGETARCH
+ARG OS_RELEASE=xenial
+ARG DIR=ubuntu-16.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1604[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu1604 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.ubuntu1804 b/hack/gen-repository-iso/dockerfile.ubuntu1804
new file mode 100644
index 00000000..f9852d8d
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.ubuntu1804
@@ -0,0 +1,34 @@
+FROM ubuntu:18.04 as ubuntu1804
+ARG TARGETARCH
+ARG OS_RELEASE=bionic
+ARG DIR=ubuntu-18.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu1804[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu1804 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.ubuntu2004 b/hack/gen-repository-iso/dockerfile.ubuntu2004
new file mode 100644
index 00000000..9cb4f0b2
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.ubuntu2004
@@ -0,0 +1,33 @@
+FROM ubuntu:20.04 as ubuntu2004
+ARG TARGETARCH
+ARG OS_RELEASE=focal
+ARG DIR=ubuntu-20.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2004[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ && add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu2004 /package/*.iso /
diff --git a/hack/gen-repository-iso/dockerfile.ubuntu2204 b/hack/gen-repository-iso/dockerfile.ubuntu2204
new file mode 100644
index 00000000..7a92912a
--- /dev/null
+++ b/hack/gen-repository-iso/dockerfile.ubuntu2204
@@ -0,0 +1,33 @@
+FROM ubuntu:22.04 as ubuntu2204
+ARG TARGETARCH
+ARG OS_RELEASE=jammy
+ARG DIR=ubuntu-22.04-${TARGETARCH}-debs
+ARG PKGS=.common[],.debs[],.ubuntu[],.ubuntu2204[]
+ARG BUILD_TOOLS="apt-transport-https software-properties-common ca-certificates curl wget gnupg dpkg-dev genisoimage"
+ENV DEBIAN_FRONTEND=noninteractive
+
+# dump system package list
+RUN dpkg --get-selections | grep -v deinstall | cut -f1 | cut -d ':' -f1 > packages.list
+RUN apt update -qq \
+ && apt install -y --no-install-recommends $BUILD_TOOLS \
+ #&& add-apt-repository ppa:gluster/glusterfs-7 -y \
+ && curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add -qq - \
+ && echo "deb [arch=$TARGETARCH] https://download.docker.com/linux/ubuntu ${OS_RELEASE} stable" > /etc/apt/sources.list.d/docker.list\
+ && apt update -qq
+
+WORKDIR /package
+COPY packages.yaml .
+
+COPY --from=mikefarah/yq:4.11.1 /usr/bin/yq /usr/bin/yq
+RUN yq eval "${PKGS}" packages.yaml >> packages.list \
+ && sort -u packages.list | xargs apt-get install --yes --reinstall --print-uris | awk -F "'" '{print $2}' | grep -v '^$' | sort -u > packages.urls
+
+RUN mkdir -p ${DIR} \
+ && wget -q -x -P ${DIR} -i packages.urls \
+ && cd ${DIR} \
+ && dpkg-scanpackages ./ /dev/null | gzip -9c > ./Packages.gz
+
+RUN genisoimage -r -o ${DIR}.iso ${DIR}
+
+FROM scratch
+COPY --from=ubuntu2204 /package/*.iso /
diff --git a/hack/gen-repository-iso/download-pkgs.sh b/hack/gen-repository-iso/download-pkgs.sh
new file mode 100644
index 00000000..ee0afa35
--- /dev/null
+++ b/hack/gen-repository-iso/download-pkgs.sh
@@ -0,0 +1,7 @@
+#! /bin/sh
+
+for p in ${PACKAGES} ; do
+ echo "\n Download $p ... \n"
+ sudo apt-get download $p 2>>errors.txt
+ for i in $(apt-cache depends $p | grep -E 'Depends|Recommends|Suggests' | cut -d ':' -f 2,3 | sed -e s/' '/''/); do sudo apt-get download $i 2>>errors.txt; done
+done
diff --git a/hack/gen-repository-iso/packages.yaml b/hack/gen-repository-iso/packages.yaml
new file mode 100644
index 00000000..65d89a3e
--- /dev/null
+++ b/hack/gen-repository-iso/packages.yaml
@@ -0,0 +1,88 @@
+---
+common:
+ - curl
+ - ceph-common
+ - net-tools
+ - lvm2
+ - telnet
+ - tcpdump
+ - socat
+ - openssl
+ - chrony
+ - conntrack
+ - curl
+ - ipvsadm
+ - ipset
+ - psmisc
+ - bash-completion
+ - ebtables
+ - haproxy
+ - keepalived
+rpms:
+ - nfs-utils
+ - yum-utils
+ - bind-utils
+ - glusterfs-fuse
+ - lz4
+ - nss
+ - nss-sysinit
+ - nss-tools
+ - conntrack-tools
+debs:
+ - apt-transport-https
+ - ca-certificates
+ - dnsutils
+ - git
+ - glusterfs-client
+ - gnupg-agent
+ - nfs-common
+ - openssh-server
+ - software-properties-common
+ - sudo
+
+centos:
+ - containerd.io
+
+centos7:
+ - libselinux-python
+ - docker-ce-20.10.8
+ - docker-ce-cli-20.10.8
+
+debian:
+ - containerd.io
+
+debian10:
+ - docker-ce=5:20.10.8~3-0~debian-buster
+ - docker-ce-cli=5:20.10.8~3-0~debian-buster
+
+debian11:
+ - docker-ce=5:20.10.8~3-0~debian-bullseye
+ - docker-ce-cli=5:20.10.8~3-0~debian-bullseye
+
+ubuntu:
+ - containerd.io
+
+ubuntu1604:
+ - docker-ce=5:20.10.8~3-0~ubuntu-xenial
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-xenial
+
+ubuntu1804:
+ - docker-ce=5:20.10.8~3-0~ubuntu-bionic
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-bionic
+
+ubuntu2004:
+ - docker-ce=5:20.10.8~3-0~ubuntu-focal
+ - docker-ce-cli=5:20.10.8~3-0~ubuntu-focal
+
+# The minimum version of docker-ce on ubuntu 2204 is 20.10.13
+ubuntu2204:
+ - docker-ce=5:20.10.13~3-0~ubuntu-jammy
+ - docker-ce-cli=5:20.10.13~3-0~ubuntu-jammy
+
+almalinux:
+ - containerd.io
+ - docker-compose-plugin
+
+almalinux90:
+ - docker-ce-20.10.17
+ - docker-ce-cli-20.10.17
diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh
new file mode 100755
index 00000000..721c8c14
--- /dev/null
+++ b/hack/lib/golang.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+# This is a modified version of Kubernetes
+KUBE_GO_PACKAGE=kubesphere.io/kubesphere
+
+# Ensure the go tool exists and is a viable version.
+kube::golang::verify_go_version() {
+ if [[ -z "$(command -v go)" ]]; then
+ kube::log::usage_from_stdin <&1)
+# Y=$(kube::readlinkdashf $1 2>&1)
+# if [ "$X" != "$Y" ]; then
+# echo readlinkdashf $1: expected "$X", got "$Y"
+# fi
+# }
+# testone /
+# testone /tmp
+# testone $T
+# testone $T/file
+# testone $T/dir
+# testone $T/linkfile
+# testone $T/linkdir
+# testone $T/nonexistant
+# testone $T/linkdir/file
+# testone $T/linkdir/dir
+# testone $T/linkdir/linkfile
+# testone $T/linkdir/linkdir
+function kube::readlinkdashf {
+ # run in a subshell for simpler 'cd'
+ (
+ if [[ -d "${1}" ]]; then # This also catch symlinks to dirs.
+ cd "${1}"
+ pwd -P
+ else
+ cd "$(dirname "${1}")"
+ local f
+ f=$(basename "${1}")
+ if [[ -L "${f}" ]]; then
+ readlink "${f}"
+ else
+ echo "$(pwd -P)/${f}"
+ fi
+ fi
+ )
+}
+
+# This emulates "realpath" which is not available on MacOS X
+# Test:
+# T=/tmp/$$.$RANDOM
+# mkdir $T
+# touch $T/file
+# mkdir $T/dir
+# ln -s $T/file $T/linkfile
+# ln -s $T/dir $T/linkdir
+# function testone() {
+# X=$(realpath $1 2>&1)
+# Y=$(kube::realpath $1 2>&1)
+# if [ "$X" != "$Y" ]; then
+# echo realpath $1: expected "$X", got "$Y"
+# fi
+# }
+# testone /
+# testone /tmp
+# testone $T
+# testone $T/file
+# testone $T/dir
+# testone $T/linkfile
+# testone $T/linkdir
+# testone $T/nonexistant
+# testone $T/linkdir/file
+# testone $T/linkdir/dir
+# testone $T/linkdir/linkfile
+# testone $T/linkdir/linkdir
+kube::realpath() {
+ if [[ ! -e "${1}" ]]; then
+ echo "${1}: No such file or directory" >&2
+ return 1
+ fi
+ kube::readlinkdashf "${1}"
+}
diff --git a/hack/lib/logging.sh b/hack/lib/logging.sh
new file mode 100755
index 00000000..ac44d0d4
--- /dev/null
+++ b/hack/lib/logging.sh
@@ -0,0 +1,171 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Controls verbosity of the script output and logging.
+KUBE_VERBOSE="${KUBE_VERBOSE:-5}"
+
+# Handler for when we exit automatically on an error.
+# Borrowed from https://gist.github.com/ahendrix/7030300
+kube::log::errexit() {
+ local err="${PIPESTATUS[*]}"
+
+ # If the shell we are in doesn't have errexit set (common in subshells) then
+ # don't dump stacks.
+ set +o | grep -qe "-o errexit" || return
+
+ set +o xtrace
+ local code="${1:-1}"
+ # Print out the stack trace described by $function_stack
+ if [ ${#FUNCNAME[@]} -gt 2 ]
+ then
+ kube::log::error "Call tree:"
+ for ((i=1;i<${#FUNCNAME[@]}-1;i++))
+ do
+ kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)"
+ done
+ fi
+ kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1
+}
+
+kube::log::install_errexit() {
+ # trap ERR to provide an error handler whenever a command exits nonzero this
+ # is a more verbose version of set -o errexit
+ trap 'kube::log::errexit' ERR
+
+ # setting errtrace allows our ERR trap handler to be propagated to functions,
+ # expansions and subshells
+ set -o errtrace
+}
+
+# Print out the stack trace
+#
+# Args:
+# $1 The number of stack frames to skip when printing.
+kube::log::stack() {
+ local stack_skip=${1:-0}
+ stack_skip=$((stack_skip + 1))
+ if [[ ${#FUNCNAME[@]} -gt ${stack_skip} ]]; then
+ echo "Call stack:" >&2
+ local i
+ for ((i=1 ; i <= ${#FUNCNAME[@]} - stack_skip ; i++))
+ do
+ local frame_no=$((i - 1 + stack_skip))
+ local source_file=${BASH_SOURCE[${frame_no}]}
+ local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
+ local funcname=${FUNCNAME[${frame_no}]}
+ echo " ${i}: ${source_file}:${source_lineno} ${funcname}(...)" >&2
+ done
+ fi
+}
+
+# Log an error and exit.
+# Args:
+# $1 Message to log with the error
+# $2 The error code to return
+# $3 The number of stack frames to skip when printing.
+kube::log::error_exit() {
+ local message="${1:-}"
+ local code="${2:-1}"
+ local stack_skip="${3:-0}"
+ stack_skip=$((stack_skip + 1))
+
+ if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
+ local source_file=${BASH_SOURCE[${stack_skip}]}
+ local source_line=${BASH_LINENO[$((stack_skip - 1))]}
+ echo "!!! Error in ${source_file}:${source_line}" >&2
+ [[ -z ${1-} ]] || {
+ echo " ${1}" >&2
+ }
+
+ kube::log::stack ${stack_skip}
+
+ echo "Exiting with status ${code}" >&2
+ fi
+
+ exit "${code}"
+}
+
+# Log an error but keep going. Don't dump the stack or exit.
+kube::log::error() {
+ timestamp=$(date +"[%m%d %H:%M:%S]")
+ echo "!!! ${timestamp} ${1-}" >&2
+ shift
+ for message; do
+ echo " ${message}" >&2
+ done
+}
+
+# Print an usage message to stderr. The arguments are printed directly.
+kube::log::usage() {
+ echo >&2
+ local message
+ for message; do
+ echo "${message}" >&2
+ done
+ echo >&2
+}
+
+kube::log::usage_from_stdin() {
+ local messages=()
+ while read -r line; do
+ messages+=("${line}")
+ done
+
+ kube::log::usage "${messages[@]}"
+}
+
+# Print out some info that isn't a top level status line
+kube::log::info() {
+ local V="${V:-0}"
+ if [[ ${KUBE_VERBOSE} < ${V} ]]; then
+ return
+ fi
+
+ for message; do
+ echo "${message}"
+ done
+}
+
+# Just like kube::log::info, but no \n, so you can make a progress bar
+kube::log::progress() {
+ for message; do
+ echo -e -n "${message}"
+ done
+}
+
+kube::log::info_from_stdin() {
+ local messages=()
+ while read -r line; do
+ messages+=("${line}")
+ done
+
+ kube::log::info "${messages[@]}"
+}
+
+# Print a status line. Formatted to show up in a stream of output.
+kube::log::status() {
+ local V="${V:-0}"
+ if [[ ${KUBE_VERBOSE} < ${V} ]]; then
+ return
+ fi
+
+ timestamp=$(date +"[%m%d %H:%M:%S]")
+ echo "+++ ${timestamp} ${1}"
+ shift
+ for message; do
+ echo " ${message}"
+ done
+}
diff --git a/hack/lib/util.sh b/hack/lib/util.sh
new file mode 100755
index 00000000..2bb1a14b
--- /dev/null
+++ b/hack/lib/util.sh
@@ -0,0 +1,765 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function kube::util::sourced_variable {
+ # Call this function to tell shellcheck that a variable is supposed to
+ # be used from other calling context. This helps quiet an "unused
+ # variable" warning from shellcheck and also document your code.
+ true
+}
+
+kube::util::sortable_date() {
+ date "+%Y%m%d-%H%M%S"
+}
+
+# arguments: target, item1, item2, item3, ...
+# returns 0 if target is in the given items, 1 otherwise.
+kube::util::array_contains() {
+ local search="$1"
+ local element
+ shift
+ for element; do
+ if [[ "${element}" == "${search}" ]]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+kube::util::wait_for_url() {
+ local url=$1
+ local prefix=${2:-}
+ local wait=${3:-1}
+ local times=${4:-30}
+ local maxtime=${5:-1}
+
+ command -v curl >/dev/null || {
+ kube::log::usage "curl must be installed"
+ exit 1
+ }
+
+ local i
+ for i in $(seq 1 "${times}"); do
+ local out
+ if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
+ kube::log::status "On try ${i}, ${prefix}: ${out}"
+ return 0
+ fi
+ sleep "${wait}"
+ done
+ kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
+ return 1
+}
+
+# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
+# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
+kube::util::trap_add() {
+ local trap_add_cmd
+ trap_add_cmd=$1
+ shift
+
+ for trap_add_name in "$@"; do
+ local existing_cmd
+ local new_cmd
+
+ # Grab the currently defined trap commands for this trap
+ existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
+
+ if [[ -z "${existing_cmd}" ]]; then
+ new_cmd="${trap_add_cmd}"
+ else
+ new_cmd="${trap_add_cmd};${existing_cmd}"
+ fi
+
+ # Assign the test. Disable the shellcheck warning telling that trap
+ # commands should be single quoted to avoid evaluating them at this
+ # point instead evaluating them at run time. The logic of adding new
+ # commands to a single trap requires them to be evaluated right away.
+ # shellcheck disable=SC2064
+ trap "${new_cmd}" "${trap_add_name}"
+ done
+}
+
+# Opposite of kube::util::ensure-temp-dir()
+kube::util::cleanup-temp-dir() {
+ rm -rf "${KUBE_TEMP}"
+}
+
+# Create a temp dir that'll be deleted at the end of this bash session.
+#
+# Vars set:
+# KUBE_TEMP
+kube::util::ensure-temp-dir() {
+ if [[ -z ${KUBE_TEMP-} ]]; then
+ KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
+ kube::util::trap_add kube::util::cleanup-temp-dir EXIT
+ fi
+}
+
+kube::util::host_os() {
+ local host_os
+ case "$(uname -s)" in
+ Darwin)
+ host_os=darwin
+ ;;
+ Linux)
+ host_os=linux
+ ;;
+ *)
+ kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
+ exit 1
+ ;;
+ esac
+ echo "${host_os}"
+}
+
+kube::util::host_arch() {
+ local host_arch
+ case "$(uname -m)" in
+ x86_64*)
+ host_arch=amd64
+ ;;
+ i?86_64*)
+ host_arch=amd64
+ ;;
+ amd64*)
+ host_arch=amd64
+ ;;
+ aarch64*)
+ host_arch=arm64
+ ;;
+ arm64*)
+ host_arch=arm64
+ ;;
+ arm*)
+ host_arch=arm
+ ;;
+ i?86*)
+ host_arch=x86
+ ;;
+ s390x*)
+ host_arch=s390x
+ ;;
+ ppc64le*)
+ host_arch=ppc64le
+ ;;
+ *)
+ kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
+ exit 1
+ ;;
+ esac
+ echo "${host_arch}"
+}
+
+# This figures out the host platform without relying on golang. We need this as
+# we don't want a golang install to be a prerequisite to building yet we need
+# this info to figure out where the final binaries are placed.
+kube::util::host_platform() {
+ echo "$(kube::util::host_os)/$(kube::util::host_arch)"
+}
+
+# looks for $1 in well-known output locations for the platform ($2)
+# $KUBE_ROOT must be set
+kube::util::find-binary-for-platform() {
+ local -r lookfor="$1"
+ local -r platform="$2"
+ local locations=(
+ "${KUBE_ROOT}/_output/bin/${lookfor}"
+ "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
+ "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
+ "${KUBE_ROOT}/platforms/${platform}/${lookfor}"
+ )
+ # Also search for binary in bazel build tree.
+ # The bazel go rules place some binaries in subtrees like
+ # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
+ # the platform name is matched in the path.
+ while IFS=$'\n' read -r location; do
+ locations+=("$location");
+ done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
+ \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true)
+
+ # List most recently-updated location.
+ local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
+ echo -n "${bin}"
+}
+
+# looks for $1 in well-known output locations for the host platform
+# $KUBE_ROOT must be set
+kube::util::find-binary() {
+ kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
+}
+
+# Run all known doc generators (today gendocs and genman for kubectl)
+# $1 is the directory to put those generated documents
+kube::util::gen-docs() {
+ local dest="$1"
+
+ # Find binary
+ gendocs=$(kube::util::find-binary "gendocs")
+ genkubedocs=$(kube::util::find-binary "genkubedocs")
+ genman=$(kube::util::find-binary "genman")
+ genyaml=$(kube::util::find-binary "genyaml")
+ genfeddocs=$(kube::util::find-binary "genfeddocs")
+
+ # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at
+ # least from k/k tree), remove it completely.
+ kube::util::sourced_variable "${genfeddocs}"
+
+ mkdir -p "${dest}/docs/user-guide/kubectl/"
+ "${gendocs}" "${dest}/docs/user-guide/kubectl/"
+ mkdir -p "${dest}/docs/admin/"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
+ "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
+ "${genkubedocs}" "${dest}/docs/admin/" "kubelet"
+ "${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
+
+ mkdir -p "${dest}/docs/man/man1/"
+ "${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
+ "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
+ "${genman}" "${dest}/docs/man/man1/" "kube-proxy"
+ "${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
+ "${genman}" "${dest}/docs/man/man1/" "kubelet"
+ "${genman}" "${dest}/docs/man/man1/" "kubectl"
+ "${genman}" "${dest}/docs/man/man1/" "kubeadm"
+
+ mkdir -p "${dest}/docs/yaml/kubectl/"
+ "${genyaml}" "${dest}/docs/yaml/kubectl/"
+
+ # create the list of generated files
+ pushd "${dest}" > /dev/null || return 1
+ touch docs/.generated_docs
+ find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
+ popd > /dev/null || return 1
+}
+
+# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
+# must be set.
+kube::util::remove-gen-docs() {
+ if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
+ # remove all of the old docs; we don't want to check them in.
+ while read -r file; do
+ rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
+ done <"${KUBE_ROOT}/docs/.generated_docs"
+ # The docs/.generated_docs file lists itself, so we don't need to explicitly
+ # delete it.
+ fi
+}
+
+# Takes a group/version and returns the path to its location on disk, sans
+# "pkg". E.g.:
+# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
+# * default behavior for only a group: experimental -> apis/experimental
+# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
+# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
+# * Very special handling for when both group and version are "": / -> api
+#
+# $KUBE_ROOT must be set.
+kube::util::group-version-to-pkg-path() {
+ local group_version="$1"
+
+ while IFS=$'\n' read -r api; do
+ if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then
+ echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
+ return
+ fi
+ done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort)
+
+ # "v1" is the API GroupVersion
+ if [[ "${group_version}" == "v1" ]]; then
+ echo "vendor/k8s.io/api/core/v1"
+ return
+ fi
+
+ # Special cases first.
+ # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
+ # moving the results to pkg/apis/api.
+ case "${group_version}" in
+ # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
+ __internal)
+ echo "pkg/apis/core"
+ ;;
+ meta/v1)
+ echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
+ ;;
+ meta/v1beta1)
+ echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ ;;
+ *.k8s.io)
+ echo "pkg/apis/${group_version%.*k8s.io}"
+ ;;
+ *.k8s.io/*)
+ echo "pkg/apis/${group_version/.*k8s.io/}"
+ ;;
+ *)
+ echo "pkg/apis/${group_version%__internal}"
+ ;;
+ esac
+}
+
+# Takes a group/version and returns the swagger-spec file name.
+# default behavior: extensions/v1beta1 -> extensions_v1beta1
+# special case for v1: v1 -> v1
+kube::util::gv-to-swagger-name() {
+ local group_version="$1"
+ case "${group_version}" in
+ v1)
+ echo "v1"
+ ;;
+ *)
+ echo "${group_version%/*}_${group_version#*/}"
+ ;;
+ esac
+}
+
+# Returns the name of the upstream remote repository name for the local git
+# repo, e.g. "upstream" or "origin".
+kube::util::git_upstream_remote_name() {
+ git remote -v | grep fetch |\
+ grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
+ head -n 1 | awk '{print $1}'
+}
+
+# Exits script if working directory is dirty. If it's run interactively in the terminal
+# the user can commit changes in a second terminal. This script will wait.
+kube::util::ensure_clean_working_dir() {
+ while ! git diff HEAD --exit-code &>/dev/null; do
+ echo -e "\nUnexpected dirty working directory:\n"
+ if tty -s; then
+ git status -s
+ else
+ git diff -a # be more verbose in log files without tty
+ exit 1
+ fi | sed 's/^/ /'
+ echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
+ read -r
+ done 1>&2
+}
+
+# Find the base commit using:
+# $PULL_BASE_SHA if set (from Prow)
+# current ref from the remote upstream branch
+kube::util::base_ref() {
+ local -r git_branch=$1
+
+ if [[ -n ${PULL_BASE_SHA:-} ]]; then
+ echo "${PULL_BASE_SHA}"
+ return
+ fi
+
+ full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
+
+ # make sure the branch is valid, otherwise the check will pass erroneously.
+ if ! git describe "${full_branch}" >/dev/null; then
+ # abort!
+ exit 1
+ fi
+
+ echo "${full_branch}"
+}
+
+# Checks whether there are any files matching pattern $2 changed between the
+# current branch and upstream branch named by $1.
+# Returns 1 (false) if there are no changes
+# 0 (true) if there are changes detected.
+kube::util::has_changes() {
+ local -r git_branch=$1
+ local -r pattern=$2
+ local -r not_pattern=${3:-totallyimpossiblepattern}
+
+ local base_ref
+ base_ref=$(kube::util::base_ref "${git_branch}")
+ echo "Checking for '${pattern}' changes against '${base_ref}'"
+
+ # notice this uses ... to find the first shared ancestor
+ if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
+ return 0
+ fi
+ # also check for pending changes
+ if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
+ echo "Detected '${pattern}' uncommitted changes."
+ return 0
+ fi
+ echo "No '${pattern}' changes detected."
+ return 1
+}
+
+kube::util::download_file() {
+ local -r url=$1
+ local -r destination_file=$2
+
+ rm "${destination_file}" 2&> /dev/null || true
+
+ for i in $(seq 5)
+ do
+ if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
+ echo "Downloading ${url} failed. $((5-i)) retries left."
+ sleep 1
+ else
+ echo "Downloading ${url} succeed"
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Test whether openssl is installed.
+# Sets:
+# OPENSSL_BIN: The path to the openssl binary to use
+function kube::util::test_openssl_installed {
+ if ! openssl version >& /dev/null; then
+ echo "Failed to run openssl. Please ensure openssl is installed"
+ exit 1
+ fi
+
+ OPENSSL_BIN=$(command -v openssl)
+}
+
+# creates a client CA, args are sudo, dest-dir, ca-id, purpose
+# purpose is dropped in after "key encipherment", you usually want
+# '"client auth"'
+# '"server auth"'
+# '"client auth","server auth"'
+function kube::util::create_signing_certkey {
+ local sudo=$1
+ local dest_dir=$2
+ local id=$3
+ local purpose=$4
+ # Create client ca
+ ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json"
+EOF
+}
+
+# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
+function kube::util::create_client_certkey {
+ local sudo=$1
+ local dest_dir=$2
+ local ca=$3
+ local id=$4
+ local cn=${5:-$4}
+ local groups=""
+ local SEP=""
+ shift 5
+ while [ -n "${1:-}" ]; do
+ groups+="${SEP}{\"O\":\"$1\"}"
+ SEP=","
+ shift 1
+ done
+ ${sudo} /usr/bin/env bash -e < /dev/null
+apiVersion: v1
+kind: Config
+clusters:
+ - cluster:
+ certificate-authority: ${ca_file}
+ server: https://${api_host}:${api_port}/
+ name: local-up-cluster
+users:
+ - user:
+ token: ${token}
+ client-certificate: ${dest_dir}/client-${client_id}.crt
+ client-key: ${dest_dir}/client-${client_id}.key
+ name: local-up-cluster
+contexts:
+ - context:
+ cluster: local-up-cluster
+ user: local-up-cluster
+ name: local-up-cluster
+current-context: local-up-cluster
+EOF
+
+ # flatten the kubeconfig files to make them self contained
+ username=$(whoami)
+ ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig"
+ mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
+ chown ${username} "${dest_dir}/${client_id}.kubeconfig"
+EOF
+}
+
+# list_staging_repos outputs a sorted list of repos in staging/src/k8s.io
+# each entry will just be the $repo portion of staging/src/k8s.io/$repo/...
+# $KUBE_ROOT must be set.
+function kube::util::list_staging_repos() {
+ (
+ cd "${KUBE_ROOT}/staging/src/kubesphere.io" && \
+ find . -mindepth 1 -maxdepth 1 -type d | cut -c 3- | sort
+ )
+}
+
+
+# Determines if docker can be run, failures may simply require that the user be added to the docker group.
+function kube::util::ensure_docker_daemon_connectivity {
+ IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}"
+ # Expand ${DOCKER[@]} only if it's not unset. This is to work around
+ # Bash 3 issue with unbound variable.
+ DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"})
+ if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
+ cat <<'EOF' >&2
+Can't connect to 'docker' daemon. please fix and retry.
+
+Possible causes:
+ - Docker Daemon not started
+ - Linux: confirm via your init system
+ - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start `
+ - macOS w/ Docker for Mac: Check the menu bar and start the Docker application
+ - DOCKER_HOST hasn't been set or is set incorrectly
+ - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
+ - macOS w/ docker-machine: run `eval "$(docker-machine env )"`
+ - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
+ - Other things to check:
+ - Linux: User isn't in 'docker' group. Add and relogin.
+ - Something like 'sudo usermod -a -G docker ${USER}'
+ - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
+EOF
+ return 1
+ fi
+}
+
+# Wait for background jobs to finish. Return with
+# an error status if any of the jobs failed.
+kube::util::wait-for-jobs() {
+ local fail=0
+ local job
+ for job in $(jobs -p); do
+ wait "${job}" || fail=$((fail + 1))
+ done
+ return ${fail}
+}
+
+# kube::util::join
+# Concatenates the list elements with the delimiter passed as first parameter
+#
+# Ex: kube::util::join , a b c
+# -> a,b,c
+function kube::util::join {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
+#
+# Assumed vars:
+# $1 (cfssl directory) (optional)
+#
+# Sets:
+# CFSSL_BIN: The path of the installed cfssl binary
+# CFSSLJSON_BIN: The path of the installed cfssljson binary
+#
+function kube::util::ensure-cfssl {
+ if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
+ CFSSL_BIN=$(command -v cfssl)
+ CFSSLJSON_BIN=$(command -v cfssljson)
+ return 0
+ fi
+
+ host_arch=$(kube::util::host_arch)
+
+ if [[ "${host_arch}" != "amd64" ]]; then
+ echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed."
+ echo "Please install cfssl and cfssljson and verify they are in \$PATH."
+ echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
+ exit 1
+ fi
+
+ # Create a temp dir for cfssl if no directory was given
+ local cfssldir=${1:-}
+ if [[ -z "${cfssldir}" ]]; then
+ kube::util::ensure-temp-dir
+ cfssldir="${KUBE_TEMP}/cfssl"
+ fi
+
+ mkdir -p "${cfssldir}"
+ pushd "${cfssldir}" > /dev/null || return 1
+
+ echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..."
+ kernel=$(uname -s)
+ case "${kernel}" in
+ Linux)
+ curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
+ curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
+ ;;
+ Darwin)
+ curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
+ curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
+ ;;
+ *)
+ echo "Unknown, unsupported platform: ${kernel}." >&2
+ echo "Supported platforms: Linux, Darwin." >&2
+ exit 2
+ esac
+
+ chmod +x cfssl || true
+ chmod +x cfssljson || true
+
+ CFSSL_BIN="${cfssldir}/cfssl"
+ CFSSLJSON_BIN="${cfssldir}/cfssljson"
+ if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
+ echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
+ echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
+ exit 1
+ fi
+ popd > /dev/null || return 1
+}
+
+# kube::util::ensure_dockerized
+# Confirms that the script is being run inside a kube-build image
+#
+function kube::util::ensure_dockerized {
+ if [[ -f /kube-build-image ]]; then
+ return 0
+ else
+ echo "ERROR: This script is designed to be run inside a kube-build container"
+ exit 1
+ fi
+}
+
+# kube::util::ensure-gnu-sed
+# Determines which sed binary is gnu-sed on linux/darwin
+#
+# Sets:
+# SED: The name of the gnu-sed binary
+#
+function kube::util::ensure-gnu-sed {
+ if LANG=C sed --help 2>&1 | grep -q GNU; then
+ SED="sed"
+ elif command -v gsed &>/dev/null; then
+ SED="gsed"
+ else
+ kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
+ return 1
+ fi
+ kube::util::sourced_variable "${SED}"
+}
+
+# kube::util::check-file-in-alphabetical-order
+# Check that the file is in alphabetical order
+#
+function kube::util::check-file-in-alphabetical-order {
+ local failure_file="$1"
+ if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
+ {
+ echo
+ echo "${failure_file} is not in alphabetical order. Please sort it:"
+ echo
+ echo " LC_ALL=C sort -o ${failure_file} ${failure_file}"
+ echo
+ } >&2
+ false
+ fi
+}
+
+# kube::util::require-jq
+# Checks whether jq is installed.
+function kube::util::require-jq {
+ if ! command -v jq &>/dev/null; then
+ echo "jq not found. Please install." 1>&2
+ return 1
+ fi
+}
+
+# outputs md5 hash of $1, works on macOS and Linux
+function kube::util::md5() {
+ if which md5 >/dev/null 2>&1; then
+ md5 -q "$1"
+ else
+ md5sum "$1" | awk '{ print $1 }'
+ fi
+}
+
+# kube::util::read-array
+# Reads in stdin and adds it line by line to the array provided. This can be
+# used instead of "mapfile -t", and is bash 3 compatible.
+#
+# Assumed vars:
+# $1 (name of array to create/modify)
+#
+# Example usage:
+# kube::util::read-array files < <(ls -1)
+#
+function kube::util::read-array {
+ local i=0
+ unset -v "$1"
+ while IFS= read -r "$1[i++]"; do :; done
+ eval "[[ \${$1[--i]} ]]" || unset "$1[i]" # ensures last element isn't empty
+}
+
+# Some useful colors.
+if [[ -z "${color_start-}" ]]; then
+ declare -r color_start="\033["
+ declare -r color_red="${color_start}0;31m"
+ declare -r color_yellow="${color_start}0;33m"
+ declare -r color_green="${color_start}0;32m"
+ declare -r color_blue="${color_start}1;34m"
+ declare -r color_cyan="${color_start}1;36m"
+ declare -r color_norm="${color_start}0m"
+
+ kube::util::sourced_variable "${color_start}"
+ kube::util::sourced_variable "${color_red}"
+ kube::util::sourced_variable "${color_yellow}"
+ kube::util::sourced_variable "${color_green}"
+ kube::util::sourced_variable "${color_blue}"
+ kube::util::sourced_variable "${color_cyan}"
+ kube::util::sourced_variable "${color_norm}"
+fi
+
+# ex: ts=2 sw=2 et filetype=sh
diff --git a/hack/sync-components.sh b/hack/sync-components.sh
new file mode 100755
index 00000000..d6585e89
--- /dev/null
+++ b/hack/sync-components.sh
@@ -0,0 +1,340 @@
+#!/usr/bin/env bash
+
+# Copyright 2022 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#####################################################################
+#
+# Usage:
+# Specify the component version through environment variables.
+#
+# For example:
+#
+# KUBERNETES_VERSION=v1.25.3 bash hack/sync-components.sh
+#
+####################################################################
+
+set -e
+
+KUBERNETES_VERSION=${KUBERNETES_VERSION}
+NODE_LOCAL_DNS_VERSION=${NODE_LOCAL_DNS_VERSION}
+COREDNS_VERSION=${COREDNS_VERSION}
+CALICO_VERSION=${CALICO_VERSION}
+KUBE_OVN_VERSION=${KUBE_OVN_VERSION}
+CILIUM_VERSION=${CILIUM_VERSION}
+OPENEBS_VERSION=${OPENEBS_VERSION}
+KUBEVIP_VERSION=${KUBEVIP_VERSION}
+HAPROXY_VERSION=${HAPROXY_VERSION}
+HELM_VERSION=${HELM_VERSION}
+CNI_VERSION=${CNI_VERSION}
+ETCD_VERSION=${ETCD_VERSION}
+CRICTL_VERSION=${CRICTL_VERSION}
+K3S_VERSION=${K3S_VERSION}
+CONTAINERD_VERSION=${CONTAINERD_VERSION}
+RUNC_VERSION=${RUNC_VERSION}
+COMPOSE_VERSION=${COMPOSE_VERSION}
+CALICO_VERSION=${CALICO_VERSION}
+
+# qsctl
+QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID}
+QSCTL_SECRET_ACCESS_KEY=${QSCTL_SECRET_ACCESS_KEY}
+
+# docker.io
+DOCKERHUB_USERNAME=${DOCKERHUB_USERNAME}
+DOCKERHUB_PASSWORD=${DOCKERHUB_PASSWORD}
+
+# registry.cn-beijing.aliyuncs.com
+ALIYUNCS_USERNAME=${ALIYUNCS_USERNAME}
+ALIYUNCS_PASSWORD=${ALIYUNCS_PASSWORD}
+
+DOCKERHUB_NAMESPACE="kubesphere"
+ALIYUNCS_NAMESPACE="kubesphereio"
+
+BINARIES=("kubeadm" "kubelet" "kubectl")
+ARCHS=("amd64" "arm64")
+
+# Generate qsctl config
+if [ $QSCTL_ACCESS_KEY_ID ] && [ $QSCTL_SECRET_ACCESS_KEY ];then
+ echo "access_key_id: $QSCTL_ACCESS_KEY_ID" > qsctl-config.yaml
+ echo "secret_access_key: $QSCTL_SECRET_ACCESS_KEY" >> qsctl-config.yaml
+fi
+
+# Login docker.io
+if [ $DOCKERHUB_USERNAME ] && [ $DOCKERHUB_PASSWORD ];then
+ skopeo login docker.io -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD
+fi
+
+# Login registry.cn-beijing.aliyuncs.com
+if [ $ALIYUNCS_USERNAME ] && [ $ALIYUNCS_PASSWORD ];then
+ skopeo login registry.cn-beijing.aliyuncs.com -u $ALIYUNCS_USERNAME -p $ALIYUNCS_PASSWORD
+fi
+
+# Sync Kubernetes Binaries and Images
+if [ $KUBERNETES_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/kube/$KUBERNETES_VERSION/$arch
+ for binary in ${BINARIES[@]}
+ do
+ echo "Synchronizing $binary-$arch"
+
+ curl -L -o binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
+ https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary
+
+ qsctl cp binaries/kube/$KUBERNETES_VERSION/$arch/$binary \
+ qs://kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/$arch/$binary \
+ -c qsctl-config.yaml
+ done
+ done
+
+ chmod +x binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm
+ binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} docker.io/$DOCKERHUB_NAMESPACE/${image##} --all
+ binaries/kube/$KUBERNETES_VERSION/amd64/kubeadm config images list | xargs -I {} skopeo sync --src docker --dest docker {} registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/${image##} --all
+
+ rm -rf binaries
+fi
+
+# Sync Helm Binary
+if [ $HELM_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/helm/$HELM_VERSION/$arch
+ echo "Synchronizing helm-$arch"
+
+ curl -L -o binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ https://get.helm.sh/helm-$HELM_VERSION-linux-$arch.tar.gz
+
+ tar -zxf binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz -C binaries/helm/$HELM_VERSION/$arch
+
+ qsctl cp $KUBERNETES_VERSION/$arch/linux-$arch/helm \
+ qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm \
+ -c qsctl-config.yaml
+
+ qsctl cp binaries/helm/$HELM_VERSION/$arch/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-helm/linux-$arch/$HELM_VERSION/helm-$HELM_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync ETCD Binary
+if [ $ETCD_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/etcd/$ETCD_VERSION/$arch
+ echo "Synchronizing etcd-$arch"
+
+ curl -L -o binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ https://github.com/coreos/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/etcd/$ETCD_VERSION/$arch/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-release/etcd/release/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync CNI Binary
+if [ $CNI_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/cni/$CNI_VERSION/$arch
+ echo "Synchronizing cni-$arch"
+
+ curl -L -o binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz
+
+ qsctl cp binaries/cni/$CNI_VERSION/$arch/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ qs://containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$arch-$CNI_VERSION.tgz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync CALICOCTL Binary
+if [ $CALICO_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/calicoctl/$CALICO_VERSION/$arch
+ echo "Synchronizing calicoctl-$arch"
+
+ curl -L -o binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
+ https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch
+
+ qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \
+ qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync crictl Binary
+if [ $CRICTL_VERSION ]; then
+ echo "access_key_id: $ACCESS_KEY_ID" > qsctl-config.yaml
+ echo "secret_access_key: $SECRET_ACCESS_KEY" >> qsctl-config.yaml
+
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/crictl/$CRICTL_VERSION/$arch
+ echo "Synchronizing crictl-$arch"
+
+ curl -L -o binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/crictl/$CRICTL_VERSION/$arch/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-release/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync k3s Binary
+if [ $K3S_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/k3s/$K3S_VERSION/$arch
+ echo "Synchronizing k3s-$arch"
+ if [ $arch != "amd64" ]; then
+ curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
+ https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s-$arch
+ else
+ curl -L -o binaries/k3s/$K3S_VERSION/$arch/k3s \
+ https://github.com/rancher/k3s/releases/download/$K3S_VERSION+k3s1/k3s
+ fi
+ qsctl cp binaries/k3s/$K3S_VERSION/$arch/k3s \
+ qs://kubernetes-release/k3s/releases/download/$K3S_VERSION+k3s1/linux/$arch/k3s \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync containerd Binary
+if [ $CONTAINERD_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/containerd/$CONTAINERD_VERSION/$arch
+ echo "Synchronizing containerd-$arch"
+
+ curl -L -o binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz
+
+ qsctl cp binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ qs://kubernetes-releas/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync runc Binary
+if [ $RUNC_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/runc/$RUNC_VERSION/$arch
+ echo "Synchronizing runc-$arch"
+
+ curl -L -o binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
+ https://github.com/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch
+
+ qsctl cp binaries/runc/$RUNC_VERSION/$arch/runc.$arch \
+ qs://kubernetes-release/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch \
+ -c qsctl-config.yaml
+ done
+
+ rm -rf binaries
+fi
+
+# Sync docker-compose Binary
+if [ $RUNC_VERSION ]; then
+ for arch in ${ARCHS[@]}
+ do
+ mkdir -p binaries/compose/$COMPOSE_VERSION/$arch
+ echo "Synchronizing runc-$arch"
+ if [ $arch == "amd64" ]; then
+ curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
+ https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64
+
+ qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-x86_64 \
+ qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-x86_64 \
+ -c qsctl-config.yaml
+
+ elif [ $arch == "arm64" ]; then
+ curl -L -o binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
+ https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64
+
+ qsctl cp binaries/compose/$COMPOSE_VERSION/$arch/docker-compose-linux-aarch64 \
+ qs://kubernetes-release/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-linux-aarch64 \
+ -c qsctl-config.yaml
+
+ fi
+ done
+
+ rm -rf binaries
+fi
+
+rm -rf qsctl-config.yaml
+
+# Sync NodeLocalDns Images
+if [ $NODE_LOCAL_DNS_VERSION ]; then
+ skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION docker.io/$DOCKERHUB_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
+ skopeo sync --src docker --dest docker registry.k8s.io/dns/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/k8s-dns-node-cache:$NODE_LOCAL_DNS_VERSION --all
+fi
+
+# Sync Coredns Images
+if [ $COREDNS_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/coredns/coredns:$COREDNS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/coredns:$COREDNS_VERSION --all
+fi
+
+# Sync Calico Images
+if [ $CALICO_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/calico/kube-controllers:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-controllers:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/cni:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cni:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/node:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/node:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/pod2daemon-flexvol:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/pod2daemon-flexvol:$CALICO_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/calico/typha:$CALICO_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/typha:$CALICO_VERSION --all
+fi
+
+# Sync Kube-OVN Images
+if [ $KUBE_OVN_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/kubeovn/kube-ovn:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kube-ovn:$KUBE_OVN_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/kubeovn/vpc-nat-gateway:$KUBE_OVN_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/vpc-nat-gateway:$KUBE_OVN_VERSION --all
+fi
+
+# Sync Cilium Images
+if [ $CILIUM_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/cilium/cilium:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium:$CILIUM_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/cilium/cilium-operator-generic:$CILIUM_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/cilium-operator-generic:$CILIUM_VERSION --all
+fi
+
+# Sync OpenEBS Images
+if [ $OPENEBS_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/openebs/provisioner-localpv:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/provisioner-localpv:$OPENEBS_VERSION --all
+ skopeo sync --src docker --dest docker docker.io/openebs/linux-utils:$OPENEBS_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/linux-utils:$OPENEBS_VERSION --all
+fi
+
+# Sync Haproxy Images
+if [ $HAPROXY_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/library/haproxy:$HAPROXY_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/haproxy:$HAPROXY_VERSION --all
+fi
+
+# Sync Kube-vip Images
+if [ $KUBEVIP_VERSION ]; then
+ skopeo sync --src docker --dest docker docker.io/plndr/kubevip:$KUBEVIP_VERSION registry.cn-beijing.aliyuncs.com/$ALIYUNCS_NAMESPACE/kubevip:$KUBEVIP_VERSION --all
+fi
diff --git a/hack/update-goimports.sh b/hack/update-goimports.sh
new file mode 100755
index 00000000..2174e3a1
--- /dev/null
+++ b/hack/update-goimports.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/hack/lib/init.sh"
+source "${KUBE_ROOT}/hack/lib/util.sh"
+
+kube::golang::verify_go_version
+
+# Ensure that we find the binaries we build before anything else.
+export GOBIN="${KUBE_OUTPUT_BINPATH}"
+PATH="${GOBIN}:${PATH}"
+
+# Explicitly opt into go modules, even though we're inside a GOPATH directory
+export GO111MODULE=on
+
+if ! command -v goimports ; then
+# Install goimports
+ echo 'installing goimports'
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
+ popd >/dev/null
+fi
+
+cd "${KUBE_ROOT}" || exit 1
+
+IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
+
+"goimports" -w -local kubesphere.io/kubesphere "${files[@]}"
diff --git a/hack/verify-goimports.sh b/hack/verify-goimports.sh
new file mode 100755
index 00000000..6f8e26da
--- /dev/null
+++ b/hack/verify-goimports.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/hack/lib/init.sh"
+source "${KUBE_ROOT}/hack/lib/util.sh"
+
+kube::golang::verify_go_version
+
+# Ensure that we find the binaries we build before anything else.
+export GOBIN="${KUBE_OUTPUT_BINPATH}"
+PATH="${GOBIN}:${PATH}"
+
+# Explicitly opt into go modules, even though we're inside a GOPATH directory
+export GO111MODULE=on
+
+if ! command -v goimports ; then
+# Install goimports
+ echo 'installing goimports'
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=auto go install -mod=mod golang.org/x/tools/cmd/goimports@v0.7.0
+ popd >/dev/null
+fi
+
+cd "${KUBE_ROOT}" || exit 1
+
+IFS=$'\n' read -r -d '' -a files < <( find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/apis/*" -not -path "./pkg/client/*" -not -name "zz_generated.deepcopy.go" && printf '\0' )
+
+output=$(goimports -local kubesphere.io/kubesphere -l "${files[@]}")
+
+if [ "${output}" != "" ]; then
+ echo "The following files are not import formatted"
+ printf '%s\n' "${output[@]}"
+ echo "Please run the following command:"
+ echo "make goimports"
+ exit 1
+fi
diff --git a/hack/version.sh b/hack/version.sh
new file mode 100755
index 00000000..aeeb8862
--- /dev/null
+++ b/hack/version.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+# Copyright 2020 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ "${TRACE-0}" == "1" ]]; then
+ set -o xtrace
+fi
+
+version::get_version_vars() {
+ # shellcheck disable=SC1083
+ GIT_COMMIT="$(git rev-parse HEAD^{commit})"
+
+ if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
+ GIT_TREE_STATE="clean"
+ else
+ GIT_TREE_STATE="dirty"
+ fi
+
+ # stolen from k8s.io/hack/lib/version.sh
+ # Use git describe to find the version based on annotated tags.
+ if [[ -n ${GIT_VERSION-} ]] || GIT_VERSION=$(git describe --tags --abbrev=14 --match "v[0-9]*" "${GIT_COMMIT}" 2>/dev/null); then
+ # This translates the "git describe" to an actual semver.org
+ # compatible semantic version that looks something like this:
+ # v1.1.0-alpha.0.6+84c76d1142ea4d
+ #
+ # TODO: We continue calling this "git version" because so many
+ # downstream consumers are expecting it there.
+ # shellcheck disable=SC2001
+ DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g")
+ if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
+ # We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
+ # shellcheck disable=SC2001
+ GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\-\2/")
+ elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
+ # We have distance to base tag (v1.1.0-1-gCommitHash)
+ # shellcheck disable=SC2001
+ GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/-\1/")
+ fi
+ if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then
+ # git describe --dirty only considers changes to existing files, but
+ # that is problematic since new untracked .go files affect the build,
+ # so use our idea of "dirty" from git status instead.
+ GIT_VERSION+="-dirty"
+ fi
+
+
+ # Try to match the "git describe" output to a regex to try to extract
+ # the "major" and "minor" versions and whether this is the exact tagged
+ # version or whether the tree is between two tagged versions.
+ if [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
+ GIT_MAJOR=${BASH_REMATCH[1]}
+ GIT_MINOR=${BASH_REMATCH[2]}
+ fi
+
+ # If GIT_VERSION is not a valid Semantic Version, then refuse to build.
+ if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
+ echo "GIT_VERSION should be a valid Semantic Version. Current value: ${GIT_VERSION}"
+ echo "Please see more details here: https://semver.org"
+ exit 1
+ fi
+ fi
+
+ GIT_RELEASE_TAG=$(git describe --abbrev=0 --tags)
+ GIT_RELEASE_COMMIT=$(git rev-list -n 1 "${GIT_RELEASE_TAG}")
+}
+
+# stolen from k8s.io/hack/lib/version.sh and modified
+# Prints the value that needs to be passed to the -ldflags parameter of go build
+version::ldflags() {
+ version::get_version_vars
+
+ local -a ldflags
+ function add_ldflag() {
+ local key=${1}
+ local val=${2}
+ ldflags+=(
+ "-X 'github.com/kubesphere/kubekey/v4/version.${key}=${val}'"
+ )
+ }
+
+ add_ldflag "buildDate" "$(date ${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} -u +'%Y-%m-%dT%H:%M:%SZ')"
+ add_ldflag "gitCommit" "${GIT_COMMIT}"
+ add_ldflag "gitTreeState" "${GIT_TREE_STATE}"
+ add_ldflag "gitMajor" "${GIT_MAJOR}"
+ add_ldflag "gitMinor" "${GIT_MINOR}"
+ add_ldflag "gitVersion" "${GIT_VERSION}"
+ add_ldflag "gitReleaseCommit" "${GIT_RELEASE_COMMIT}"
+
+ # The -ldflags parameter takes a single string, so join the output.
+ echo "${ldflags[*]-}"
+}
+
+version::ldflags
diff --git a/pipeline/fs.go b/pipeline/fs.go
new file mode 100644
index 00000000..4fb457eb
--- /dev/null
+++ b/pipeline/fs.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pipeline
+
+import (
+ "embed"
+)
+
+//go:embed playbooks roles
+var InternalPipeline embed.FS
diff --git a/pipeline/playbooks/precheck.yaml b/pipeline/playbooks/precheck.yaml
new file mode 100644
index 00000000..0f602d19
--- /dev/null
+++ b/pipeline/playbooks/precheck.yaml
@@ -0,0 +1,7 @@
+---
+- hosts:
+ - k8s_cluster
+ - etcd
+ gather_facts: true
+ roles:
+ - {role: precheck}
diff --git a/pipeline/roles/precheck/tasks/main.yaml b/pipeline/roles/precheck/tasks/main.yaml
new file mode 100644
index 00000000..369e551c
--- /dev/null
+++ b/pipeline/roles/precheck/tasks/main.yaml
@@ -0,0 +1,114 @@
+---
+- name: Stop if either kube_control_plane or kube_node group is empty
+ assert:
+ that: "'{{ item }}' in groups"
+ loop:
+ - kube_control_plane
+ - kube_node
+ run_once: true
+
+- name: Stop if etcd group is empty in external etcd mode
+ assert:
+ that: "'etcd' in groups"
+ fail_msg: "Group 'etcd' cannot be empty in external etcd mode"
+ run_once: true
+ when:
+ - etcd_deployment_type != "kubeadm"
+
+- name: Stop if the os does not support
+ assert:
+ that: (allow_unsupported_distribution_setup | default:false) or os.release.ID in supported_os_distributions
+ fail_msg: "{{ os.release.ID }} is not a known OS"
+
+- name: Stop if unknown network plugin
+ vars:
+ require_network_plugin: ['calico', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'kube-ovn', 'kube-router', 'macvlan', 'custom_cni']
+ assert:
+ that: kube_network_plugin in require_network_plugin
+ fail_msg: "{{ kube_network_plugin }} is not supported"
+ when:
+ - kube_network_plugin | defined
+
+- name: Stop if unsupported version of Kubernetes
+ assert:
+ that: kube_version | version:'>=,{{kube_version_min_required}}'
+ fail_msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}"
+
+- name: Stop if even number of etcd hosts
+ assert:
+ that: not groups.etcd | length | divisibleby:2
+ when:
+ - inventory_hostname in groups['etcd']
+
+- name: Stop if memory is too small for masters
+ assert:
+ that: process.memInfo.MemTotal | cut:' kB' >= minimal_master_memory_mb
+ when:
+ - inventory_hostname in groups['kube_control_plane']
+
+- name: Stop if memory is too small for nodes
+ assert:
+ that: process.memInfo.MemTotal | cut:' kB' >= minimal_node_memory_mb
+ when:
+ - inventory_hostname in groups['kube_node']
+
+# This assertion will fail on the safe side: One can indeed schedule more pods
+# on a node than the CIDR-range has space for when additional pods use the host
+# network namespace. It is impossible to ascertain the number of such pods at
+# provisioning time, so to establish a guarantee, we factor these out.
+# NOTICE: the check blatantly ignores the inet6-case
+- name: Guarantee that enough network address space is available for all pods
+ assert:
+ that: "(kubelet_max_pods | default_if_none:110 | integer) <= (2 | pow: {{ 32 - kube_network_node_prefix | integer }} - 2)"
+ fail_msg: "Do not schedule more pods on a node than inet addresses are available."
+ when:
+ - inventory_hostname in groups['k8s_cluster']
+ - kube_network_node_prefix | defined
+ - kube_network_plugin != 'calico'
+
+- name: Stop if access_ip is not pingable
+ command: ping -c1 {{ access_ip }}
+ when:
+ - access_ip | defined
+ - ping_access_ip
+ changed_when: false
+
+- name: Stop if kernel version is too low
+ assert:
+ that: os.kernelVersion | split:'-' | first | version:'>=,4.9.17'
+ when:
+ - kube_network_plugin == 'cilium' or (cilium_deploy_additionally | default:false)
+
+- name: Stop if bad hostname
+ vars:
+ regex: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
+ assert:
+ that: inventory_hostname | match:regex
+ fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
+
+- name: Stop if etcd deployment type is not host, docker or kubeadm
+ vars:
+ require_etcd_deployment_type: ['kubekey', 'external', 'kubeadm']
+ assert:
+ that: etcd_deployment_type in require_etcd_deployment_type
+ fail_msg: "The etcd deployment type, 'etcd_deployment_type', must be host, docker or kubeadm"
+ when:
+ - inventory_hostname in groups['etcd']
+
+- name: Stop if container manager is not docker, crio or containerd
+ vars:
+ require_container_manager: ['docker', 'crio', 'containerd']
+ assert:
+ that: container_manager in require_container_manager
+ fail_msg: "The container manager, 'container_manager', must be docker, crio or containerd"
+ run_once: true
+
+- name: Ensure minimum containerd version
+ require_containerd_version: ['latest', 'edge', 'stable']
+ assert:
+ that: containerd_version | version:'>=,{{containerd_min_version_required}}'
+ fail_msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}"
+ run_once: yes
+ when:
+ - not containerd_version in require_containerd_version
+ - container_manager == 'containerd'
diff --git a/pkg/apis/core/v1/base.go b/pkg/apis/core/v1/base.go
new file mode 100644
index 00000000..ca5c1e62
--- /dev/null
+++ b/pkg/apis/core/v1/base.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type Base struct {
+ Name string `yaml:"name,omitempty"`
+
+ // connection/transport
+ Connection string `yaml:"connection,omitempty"`
+ Port int `yaml:"port,omitempty"`
+ RemoteUser string `yaml:"remote_user,omitempty"`
+
+ // variables
+ Vars map[string]any `yaml:"vars,omitempty"`
+
+ // module default params
+ ModuleDefaults []map[string]map[string]any `yaml:"module_defaults,omitempty"`
+
+ // flags and misc. settings
+ Environment []map[string]string `yaml:"environment,omitempty"`
+ NoLog bool `yaml:"no_log,omitempty"`
+ RunOnce bool `yaml:"run_once,omitempty"`
+ IgnoreErrors bool `yaml:"ignore_errors,omitempty"`
+ CheckMode bool `yaml:"check_mode,omitempty"`
+ Diff bool `yaml:"diff,omitempty"`
+ AnyErrorsFatal bool `yaml:"any_errors_fatal,omitempty"`
+ Throttle int `yaml:"throttle,omitempty"`
+ Timeout int `yaml:"timeout,omitempty"`
+
+ // Debugger invoke a debugger on tasks
+ Debugger string `yaml:"debugger,omitempty"`
+
+ // privilege escalation
+ Become bool `yaml:"become,omitempty"`
+ BecomeMethod string `yaml:"become_method,omitempty"`
+ BecomeUser string `yaml:"become_user,omitempty"`
+ BecomeFlags string `yaml:"become_flags,omitempty"`
+ BecomeExe string `yaml:"become_exe,omitempty"`
+}
diff --git a/pkg/apis/core/v1/block.go b/pkg/apis/core/v1/block.go
new file mode 100644
index 00000000..ef10acdb
--- /dev/null
+++ b/pkg/apis/core/v1/block.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "reflect"
+ "strings"
+
+ "k8s.io/klog/v2"
+)
+
+type Block struct {
+ BlockBase
+ // If has Block, Task should be empty
+ Task
+ IncludeTasks string `yaml:"include_tasks,omitempty"`
+
+ BlockInfo
+}
+
+type BlockBase struct {
+ Base `yaml:",inline"`
+ Conditional `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ Notifiable `yaml:",inline"`
+ Delegatable `yaml:",inline"`
+}
+
+type BlockInfo struct {
+ Block []Block `yaml:"block,omitempty"`
+ Rescue []Block `yaml:"rescue,omitempty"`
+ Always []Block `yaml:"always,omitempty"`
+}
+
+type Task struct {
+ AsyncVal int `yaml:"async,omitempty"`
+ ChangedWhen When `yaml:"changed_when,omitempty"`
+ Delay int `yaml:"delay,omitempty"`
+ FailedWhen When `yaml:"failed_when,omitempty"`
+ Loop []any `yaml:"loop,omitempty"`
+ LoopControl LoopControl `yaml:"loop_control,omitempty"`
+ Poll int `yaml:"poll,omitempty"`
+ Register string `yaml:"register,omitempty"`
+ Retries int `yaml:"retries,omitempty"`
+ Until When `yaml:"until,omitempty"`
+
+ // deprecated, used to be loop and loop_args but loop has been repurposed
+ //LoopWith string `yaml:"loop_with"`
+
+ //
+ UnknownFiled map[string]any `yaml:"-"`
+}
+
+func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ // fill baseInfo
+ var bb BlockBase
+ if err := unmarshal(&bb); err == nil {
+ b.BlockBase = bb
+ }
+
+ var m map[string]any
+ if err := unmarshal(&m); err != nil {
+ klog.Errorf("unmarshal data to map error: %v", err)
+ return err
+ }
+
+ if v, ok := m["include_tasks"]; ok {
+ b.IncludeTasks = v.(string)
+ } else if _, ok := m["block"]; ok {
+ // render block
+ var bi BlockInfo
+ err := unmarshal(&bi)
+ if err != nil {
+ klog.Errorf("unmarshal data to block error: %v", err)
+ return err
+ }
+ b.BlockInfo = bi
+ } else {
+ // render task
+ var t Task
+ err := unmarshal(&t)
+ if err != nil {
+ klog.Errorf("unmarshal data to task error: %v", err)
+ return err
+ }
+ b.Task = t
+ deleteExistField(reflect.TypeOf(Block{}), m)
+ // set unknown flied to task.UnknownFiled
+ b.UnknownFiled = m
+ }
+
+ return nil
+}
+
+func deleteExistField(rt reflect.Type, m map[string]any) {
+ for i := 0; i < rt.NumField(); i++ {
+ field := rt.Field(i)
+ if field.Anonymous {
+ deleteExistField(field.Type, m)
+ } else {
+ yamlTag := rt.Field(i).Tag.Get("yaml")
+ if yamlTag != "" {
+ for _, t := range strings.Split(yamlTag, ",") {
+ if _, ok := m[t]; ok {
+ delete(m, t)
+ break
+ }
+ }
+ } else {
+ t := strings.ToUpper(rt.Field(i).Name[:1]) + rt.Field(i).Name[1:]
+ if _, ok := m[t]; ok {
+ delete(m, t)
+ break
+ }
+ }
+ }
+ }
+}
diff --git a/pkg/apis/core/v1/collectionsearch.go b/pkg/apis/core/v1/collectionsearch.go
new file mode 100644
index 00000000..eeca5cc9
--- /dev/null
+++ b/pkg/apis/core/v1/collectionsearch.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type CollectionSearch struct {
+ Collections []string `yaml:"collections,omitempty"`
+}
diff --git a/pkg/apis/core/v1/conditional.go b/pkg/apis/core/v1/conditional.go
new file mode 100644
index 00000000..bbc1d946
--- /dev/null
+++ b/pkg/apis/core/v1/conditional.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+)
+
+type Conditional struct {
+ When When `yaml:"when,omitempty"`
+}
+
+type When struct {
+ Data []string
+}
+
+func (w *When) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err == nil {
+ w.Data = []string{s}
+ return nil
+ }
+ var a []string
+ if err := unmarshal(&a); err == nil {
+ w.Data = a
+ return nil
+ }
+ return fmt.Errorf("unsupported type, excepted string or array of strings")
+}
diff --git a/pkg/apis/core/v1/delegatable.go b/pkg/apis/core/v1/delegatable.go
new file mode 100644
index 00000000..f6fa7c79
--- /dev/null
+++ b/pkg/apis/core/v1/delegatable.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type Delegatable struct {
+ DelegateTo string `yaml:"delegate_to,omitempty"`
+ DelegateFacts bool `yaml:"delegate_facts,omitempty"`
+}
diff --git a/pkg/apis/core/v1/docs.go b/pkg/apis/core/v1/docs.go
new file mode 100644
index 00000000..72082c64
--- /dev/null
+++ b/pkg/apis/core/v1/docs.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Playbook keyword in ansible: https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#playbook-keywords
+// support list (base on ansible 2.15.5)
+
+/**
+Play
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | any_errors_fatal | ✘ |
+| 2 | become | ✘ |
+| 3 | become_exe | ✘ |
+| 4 | become_flags | ✘ |
+| 5 | become_method | ✘ |
+| 6 | become_user | ✘ |
+| 7 | check_mode | ✘ |
+| 8 | collections | ✘ |
+| 9 | connection | ✔︎ |
+| 10 | debugger | ✘ |
+| 11 | diff | ✘ |
+| 12 | environment | ✘ |
+| 13 | fact_path | ✘ |
+| 14 | force_handlers | ✘ |
+| 15 | gather_facts | ✔︎ |
+| 16 | gather_subset | ✘ |
+| 17 | gather_timeout | ✘ |
+| 18 | handlers | ✘ |
+| 19 | hosts | ✔︎ |
+| 20 | ignore_errors | ✔︎ |
+| 21 | ignore_unreachable | ✘ |
+| 22 | max_fail_percentage | ✘ |
+| 23 | module_defaults | ✘ |
+| 24 | name | ✔︎ |
+| 25 | no_log | ✘ |
+| 26 | order | ✘ |
+| 27 | port | ✘ |
+| 28 | post_task | ✔︎ |
+| 29 | pre_tasks | ✔︎ |
+| 30 | remote_user | ✘ |
+| 31 | roles | ✔︎ |
+| 32 | run_once | ✔︎ |
+| 33 | serial | ✔︎ |
+| 34 | strategy | ✘ |
+| 35 | tags | ✔︎ |
+| 36 | tasks | ✔︎ |
+| 37 | throttle | ✘ |
+| 38 | timeout | ✘ |
+| 39 | vars | ✔︎ |
+| 40 | vars_files | ✘ |
+| 41 | vars_prompt | ✘ |
++------+------------------------+------------+
+
+Role
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | any_errors_fatal | ✘ |
+| 2 | become | ✘ |
+| 3 | become_exe | ✘ |
+| 4 | become_flags | ✘ |
+| 5 | become_method | ✘ |
+| 6 | become_user | ✘ |
+| 7 | check_mode | ✘ |
+| 8 | collections | ✘ |
+| 9 | connection | ✘ |
+| 10 | debugger | ✘ |
+| 11 | delegate_facts | ✘ |
+| 12 | delegate_to | ✘ |
+| 13 | diff | ✘ |
+| 14 | environment | ✘ |
+| 15 | ignore_errors | ✔︎ |
+| 16 | ignore_unreachable | ✘ |
+| 17 | max_fail_percentage | ✘ |
+| 18 | module_defaults | ✘ |
+| 19 | name | ✔︎ |
+| 20 | no_log | ✘ |
+| 21 | port | ✘ |
+| 22 | remote_user | ✘ |
+| 23 | run_once | ✔︎ |
+| 24 | tags | ✔︎ |
+| 25 | throttle | ✘ |
+| 26 | timeout | ✘ |
+| 27 | vars | ✔︎ |
+| 28 | when | ✔︎ |
++------+------------------------+------------+
+
+Block
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | always | ✔︎ |
+| 2 | any_errors_fatal | ✘ |
+| 3 | become | ✘ |
+| 4 | become_exe | ✘ |
+| 5 | become_flags | ✘ |
+| 6 | become_method | ✘ |
+| 7 | become_user | ✘ |
+| 8 | block | ✔︎ |
+| 9 | check_mode | ✘ |
+| 10 | collections | ✘ |
+| 11 | debugger | ✘ |
+| 12 | delegate_facts | ✘ |
+| 13 | delegate_to | ✘ |
+| 14 | diff | ✘ |
+| 15 | environment | ✘ |
+| 16 | ignore_errors | ✔︎ |
+| 17 | ignore_unreachable | ✘ |
+| 18 | max_fail_percentage | ✘ |
+| 19 | module_defaults | ✘ |
+| 20 | name | ✔︎ |
+| 21 | no_log | ✘ |
+| 22 | notify | ✘ |
+| 23 | port | ✘ |
+| 24 | remote_user | ✘ |
+| 25 | rescue | ✔︎ |
+| 26 | run_once | ✘ |
+| 27 | tags | ✔︎ |
+| 28 | throttle | ✘ |
+| 29 | timeout | ✘ |
+| 30 | vars | ✔︎ |
+| 31 | when | ✔︎ |
++------+------------------------+------------+
+
+
+Task
++------+------------------------+------------+
+| Row | Keyword | Support |
++------+------------------------+------------+
+| 1 | action | ✔︎ |
+| 2 | any_errors_fatal | ✘ |
+| 3 | args | ✔︎ |
+| 4 | async | ✘ |
+| 5 | become | ✘ |
+| 6 | become_exe | ✘ |
+| 7 | become_flags | ✘ |
+| 8 | become_method | ✘ |
+| 9 | become_user | ✘ |
+| 10 | changed_when | ✘ |
+| 11 | check_mode | ✘ |
+| 12 | collections | ✘ |
+| 13 | debugger | ✘ |
+| 14 | delay | ✘ |
+| 15 | delegate_facts | ✘ |
+| 16 | delegate_to | ✘ |
+| 17 | diff | ✘ |
+| 18 | environment | ✘ |
+| 19 | failed_when | ✔︎ |
+| 20 | ignore_errors | ✔︎ |
+| 21 | ignore_unreachable | ✘ |
+| 22 | local_action | ✘ |
+| 23 | loop | ✔︎ |
+| 24 | loop_control | ✘ |
+| 25 | module_defaults | ✘ |
+| 26 | name | ✔︎ |
+| 27 | no_log | ✘ |
+| 28 | notify | ✘ |
+| 29 | poll | ✘ |
+| 30 | port | ✘ |
+| 31 | register | ✔︎ |
+| 32 | remote_user | ✘ |
+| 33 | retries | ✘ |
+| 34 | run_once | ✘ |
+| 35 | tags | ✔︎ |
+| 36 | throttle | ✘ |
+| 37 | timeout | ✘ |
+| 38 | until | ✘ |
+| 39 | vars | ✔︎ |
+| 40 | when | ✔︎ |
+| 41 | with_ | ✔︎ |
++------+------------------------+------------+
+*/
diff --git a/pkg/apis/core/v1/handler.go b/pkg/apis/core/v1/handler.go
new file mode 100644
index 00000000..c91db2f8
--- /dev/null
+++ b/pkg/apis/core/v1/handler.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type Handler struct {
+ //Task
+
+ Listen []string `yaml:"listen,omitempty"`
+}
diff --git a/pkg/apis/core/v1/loop.go b/pkg/apis/core/v1/loop.go
new file mode 100644
index 00000000..ba2cff71
--- /dev/null
+++ b/pkg/apis/core/v1/loop.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type LoopControl struct {
+ LoopVar string `yaml:"loop_var,omitempty"`
+ IndexVar string `yaml:"index_var,omitempty"`
+ Label string `yaml:"label,omitempty"`
+ Pause float32 `yaml:"pause,omitempty"`
+ Extended bool `yaml:"extended,omitempty"`
+ ExtendedAllitems bool `yaml:"extended_allitems,omitempty"`
+}
diff --git a/pkg/apis/core/v1/notifiable.go b/pkg/apis/core/v1/notifiable.go
new file mode 100644
index 00000000..46c9ff15
--- /dev/null
+++ b/pkg/apis/core/v1/notifiable.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type Notifiable struct {
+ Notify string `yaml:"notify,omitempty"`
+}
diff --git a/pkg/apis/core/v1/play.go b/pkg/apis/core/v1/play.go
new file mode 100644
index 00000000..5e636484
--- /dev/null
+++ b/pkg/apis/core/v1/play.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "fmt"
+
+type Play struct {
+ ImportPlaybook string `yaml:"import_playbook,omitempty"`
+
+ Base `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+
+ PlayHost PlayHost `yaml:"hosts,omitempty"`
+
+ // Facts
+ GatherFacts bool `yaml:"gather_facts,omitempty"`
+
+ // defaults to be deprecated, should be 'None' in future
+ //GatherSubset []GatherSubset
+ //GatherTimeout int
+ //FactPath string
+
+ // Variable Attribute
+ VarsFiles []string `yaml:"vars_files,omitempty"`
+ VarsPrompt []string `yaml:"vars_prompt,omitempty"`
+
+ // Role Attributes
+ Roles []Role `yaml:"roles,omitempty"`
+
+ // Block (Task) Lists Attributes
+ Handlers []Block `yaml:"handlers,omitempty"`
+ PreTasks []Block `yaml:"pre_tasks,omitempty"`
+ PostTasks []Block `yaml:"post_tasks,omitempty"`
+ Tasks []Block `yaml:"tasks,omitempty"`
+
+ // Flag/Setting Attributes
+ ForceHandlers bool `yaml:"force_handlers,omitempty"`
+ MaxFailPercentage float32 `yaml:"percent,omitempty"`
+ Serial PlaySerial `yaml:"serial,omitempty"`
+ Strategy string `yaml:"strategy,omitempty"`
+ Order string `yaml:"order,omitempty"`
+}
+
+type PlaySerial struct {
+ Data []any
+}
+
+func (s *PlaySerial) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var as []any
+ if err := unmarshal(&as); err == nil {
+ s.Data = as
+ return nil
+ }
+ var a any
+ if err := unmarshal(&a); err == nil {
+ s.Data = []any{a}
+ return nil
+ }
+ return fmt.Errorf("unsupported type, excepted any or array")
+
+}
+
+type PlayHost struct {
+ Hosts []string
+}
+
+func (p *PlayHost) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var hs []string
+ if err := unmarshal(&hs); err == nil {
+ p.Hosts = hs
+ return nil
+ }
+ var h string
+ if err := unmarshal(&h); err == nil {
+ p.Hosts = []string{h}
+ return nil
+ }
+ return fmt.Errorf("unsupported type, excepted string or string array")
+
+}
diff --git a/pkg/apis/core/v1/play_test.go b/pkg/apis/core/v1/play_test.go
new file mode 100644
index 00000000..8e01226b
--- /dev/null
+++ b/pkg/apis/core/v1/play_test.go
@@ -0,0 +1,221 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/yaml.v3"
+)
+
+func TestUnmarshalYaml(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted []Play
+ }{
+ {
+ name: "Unmarshal hosts with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{[]string{"localhost"}},
+ },
+ },
+ },
+ {
+ name: "Unmarshal hosts with multiple value",
+ data: []byte(`---
+- name: test play
+ hosts: ["control-plane", "worker"]
+`),
+ excepted: []Play{
+ {
+ Base: Base{
+ Name: "test play",
+ },
+ PlayHost: PlayHost{[]string{"control-plane", "worker"}},
+ },
+ },
+ },
+ {
+ name: "Unmarshal role with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - test
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal role with map value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal when with single value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+ when: "true"
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Conditional: Conditional{When: When{Data: []string{"true"}}},
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal when with multiple value",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ roles:
+ - role: test
+ when: ["true","false"]
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{
+ []string{"localhost"},
+ },
+ Roles: []Role{
+ {
+ RoleInfo{
+ Conditional: Conditional{When: When{Data: []string{"true", "false"}}},
+ Role: "test",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal single level block",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ tasks:
+ - name: test
+ custom-module: abc
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []Block{
+ {
+ BlockBase: BlockBase{Base: Base{Name: "test"}},
+ Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Unmarshal multi level block",
+ data: []byte(`---
+- name: test play
+ hosts: localhost
+ tasks:
+ - name: test
+ block:
+ - name: test | test
+ custom-module: abc
+`),
+ excepted: []Play{
+ {
+ Base: Base{Name: "test play"},
+ PlayHost: PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []Block{
+ {
+ BlockBase: BlockBase{Base: Base{Name: "test"}},
+ BlockInfo: BlockInfo{
+ Block: []Block{{
+ BlockBase: BlockBase{Base: Base{Name: "test | test"}},
+ Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
+ }},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ var pb []Play
+ err := yaml.Unmarshal(tc.data, &pb)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.excepted, pb)
+ })
+ }
+}
diff --git a/pkg/apis/core/v1/playbook.go b/pkg/apis/core/v1/playbook.go
new file mode 100644
index 00000000..31a3748d
--- /dev/null
+++ b/pkg/apis/core/v1/playbook.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "fmt"
+
+type Playbook struct {
+ Play []Play
+}
+
+func (p *Playbook) Validate() error {
+ for _, play := range p.Play {
+ if len(play.PlayHost.Hosts) == 0 {
+ return fmt.Errorf("playbook's hosts must not be empty")
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/apis/core/v1/playbook_test.go b/pkg/apis/core/v1/playbook_test.go
new file mode 100644
index 00000000..58c9f984
--- /dev/null
+++ b/pkg/apis/core/v1/playbook_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidate(t *testing.T) {
+ testcases := []struct {
+ name string
+ playbook Playbook
+ }{
+ {
+ name: "host is empty",
+ playbook: Playbook{Play: []Play{
+ {
+ Base: Base{
+ Name: "test",
+ },
+ },
+ }},
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := tc.playbook.Validate()
+ assert.Error(t, err)
+ })
+ }
+}
diff --git a/pkg/apis/core/v1/role.go b/pkg/apis/core/v1/role.go
new file mode 100644
index 00000000..a7b433cb
--- /dev/null
+++ b/pkg/apis/core/v1/role.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+type Role struct {
+ RoleInfo
+}
+
+type RoleInfo struct {
+ Base `yaml:",inline"`
+ Conditional `yaml:",inline"`
+ Taggable `yaml:",inline"`
+ CollectionSearch `yaml:",inline"`
+
+ Role string `yaml:"role,omitempty"`
+
+ Block []Block
+}
+
+func (r *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err == nil {
+ r.Role = s
+ return nil
+ }
+ var info RoleInfo
+ if err := unmarshal(&info); err == nil {
+ r.RoleInfo = info
+ return nil
+ }
+
+ return nil
+}
diff --git a/pkg/apis/core/v1/taggable.go b/pkg/apis/core/v1/taggable.go
new file mode 100644
index 00000000..a9285b82
--- /dev/null
+++ b/pkg/apis/core/v1/taggable.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "k8s.io/utils/strings/slices"
+
+type Taggable struct {
+ Tags []string `yaml:"tags,omitempty"`
+}
+
+// IsEnabled check if the block should be executed
+func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool {
+ shouldRun := true
+
+ if len(onlyTags) > 0 {
+ if slices.Contains(t.Tags, "always") {
+ shouldRun = true
+ } else if slices.Contains(onlyTags, "all") && !slices.Contains(t.Tags, "never") {
+ shouldRun = true
+ } else if slices.Contains(onlyTags, "tagged") && len(onlyTags) > 0 && !slices.Contains(t.Tags, "never") {
+ shouldRun = true
+ } else if !isdisjoint(onlyTags, t.Tags) {
+ shouldRun = true
+ } else {
+ shouldRun = false
+ }
+ }
+
+ if shouldRun && len(skipTags) > 0 {
+ if slices.Contains(skipTags, "all") {
+ if !slices.Contains(t.Tags, "always") || !slices.Contains(skipTags, "always") {
+ shouldRun = false
+ }
+ } else if !isdisjoint(skipTags, t.Tags) {
+ shouldRun = false
+ } else if slices.Contains(skipTags, "tagged") && len(skipTags) > 0 {
+ shouldRun = false
+ }
+ }
+
+ return shouldRun
+}
+
+// isdisjoint returns true if a and b have no elements in common.
+func isdisjoint(a, b []string) bool {
+ for _, s := range a {
+ if slices.Contains(b, s) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/pkg/apis/kubekey/v1/config_types.go b/pkg/apis/kubekey/v1/config_types.go
new file mode 100644
index 00000000..7efda65c
--- /dev/null
+++ b/pkg/apis/kubekey/v1/config_types.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+
+type Config struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec runtime.RawExtension `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type ConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Config `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Config{}, &ConfigList{})
+}
diff --git a/pkg/apis/kubekey/v1/inventory_types.go b/pkg/apis/kubekey/v1/inventory_types.go
new file mode 100644
index 00000000..bc23bca0
--- /dev/null
+++ b/pkg/apis/kubekey/v1/inventory_types.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+type InventoryHost map[string]runtime.RawExtension
+
+type InventoryGroup struct {
+ Groups []string `json:"groups,omitempty"`
+ Hosts []string `json:"hosts,omitempty"`
+ Vars runtime.RawExtension `json:"vars,omitempty"`
+}
+
+type InventorySpec struct {
+ // Hosts is all nodes
+ Hosts InventoryHost `json:"hosts,omitempty"`
+ // Vars for all host. the priority for vars is: host vars > group vars > inventory vars
+ // +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Vars runtime.RawExtension `json:"vars,omitempty"`
+ // Groups nodes. a group contains repeated nodes
+ // +optional
+ Groups map[string]InventoryGroup `json:"groups,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+
+type Inventory struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec InventorySpec `json:"spec,omitempty"`
+ //Status InventoryStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type InventoryList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Inventory `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Inventory{}, &InventoryList{})
+}
diff --git a/pkg/apis/kubekey/v1/pipeline_types.go b/pkg/apis/kubekey/v1/pipeline_types.go
new file mode 100644
index 00000000..6d2a04f6
--- /dev/null
+++ b/pkg/apis/kubekey/v1/pipeline_types.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type PipelinePhase string
+
+const (
+ PipelinePhasePending PipelinePhase = "Pending"
+ PipelinePhaseRunning PipelinePhase = "Running"
+ PipelinePhaseFailed PipelinePhase = "Failed"
+ PipelinePhaseSucceed PipelinePhase = "Succeed"
+)
+
+const (
+ // BuiltinsProjectAnnotation use builtins project of KubeKey
+ BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
+ // PauseAnnotation pause the pipeline
+ PauseAnnotation = "kubekey.kubesphere.io/pause"
+)
+
+type PipelineSpec struct {
+ // Project is storage for executable packages
+ // +optional
+ Project PipelineProject `json:"project,omitempty"`
+ // Playbook which to execute.
+ Playbook string `json:"playbook"`
+ // InventoryRef is the node configuration for playbook
+ // +optional
+ InventoryRef *corev1.ObjectReference `json:"inventoryRef,omitempty"`
+ // ConfigRef is the global variable configuration for playbook
+ // +optional
+ ConfigRef *corev1.ObjectReference `json:"configRef,omitempty"`
+ // Tags is the tags of playbook which to execute
+ // +optional
+ Tags []string `json:"tags,omitempty"`
+ // SkipTags is the tags of playbook which skip execute
+ // +optional
+ SkipTags []string `json:"skipTags,omitempty"`
+ // Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
+ // +optional
+ Debug bool `json:"debug,omitempty"`
+}
+
+type PipelineProject struct {
+ // Addr is the storage for executable packages (in Ansible file format).
+ // When starting with http or https, it will be obtained from a Git repository.
+ // When starting with file path, it will be obtained from the local path.
+ // +optional
+ Addr string `json:"addr,omitempty"`
+ // Name is the project name base project
+ // +optional
+ Name string `json:"name,omitempty"`
+ // Branch is the git branch of the git Addr.
+ // +optional
+ Branch string `json:"branch,omitempty"`
+ // Tag is the git branch of the git Addr.
+ // +optional
+ Tag string `json:"tag,omitempty"`
+ // InsecureSkipTLS skip tls or not when git addr is https.
+ // +optional
+ InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty"`
+ // Token of Authorization for http request
+ // +optional
+ Token string `json:"token,omitempty"`
+}
+
+type PipelineStatus struct {
+ // TaskResult total related tasks execute result.
+ TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
+ // Phase of pipeline.
+ Phase PipelinePhase `json:"phase,omitempty"`
+ // failed Reason of pipeline.
+ Reason string `json:"reason,omitempty"`
+ // FailedDetail will record the failed tasks.
+ FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
+}
+
+type PipelineTaskResult struct {
+ // Total number of tasks.
+ Total int `json:"total,omitempty"`
+ // Success number of tasks.
+ Success int `json:"success,omitempty"`
+ // Failed number of tasks.
+ Failed int `json:"failed,omitempty"`
+ // Skipped number of tasks.
+ Skipped int `json:"skipped,omitempty"`
+ // Ignored number of tasks.
+ Ignored int `json:"ignored,omitempty"`
+}
+
+type PipelineFailedDetail struct {
+ // Task name of failed task.
+ Task string `json:"task,omitempty"`
+ // failed Hosts Result of failed task.
+ Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
+}
+
+type PipelineFailedDetailHost struct {
+ // Host name of failed task.
+ Host string `json:"host,omitempty"`
+ // Stdout of failed task.
+ Stdout string `json:"stdout,omitempty"`
+ // StdErr of failed task.
+ StdErr string `json:"stdErr,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:scope=Namespaced
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Playbook",type="string",JSONPath=".spec.playbook"
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
+// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
+
+type Pipeline struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PipelineSpec `json:"spec,omitempty"`
+ Status PipelineStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type PipelineList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Pipeline `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Pipeline{}, &PipelineList{})
+}
diff --git a/pkg/apis/kubekey/v1/register.go b/pkg/apis/kubekey/v1/register.go
new file mode 100644
index 00000000..7eb22a49
--- /dev/null
+++ b/pkg/apis/kubekey/v1/register.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API Schema definitions for the kubekey v1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=kubekey.kubesphere.io
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/pkg/apis/kubekey/v1/zz_generated.deepcopy.go b/pkg/apis/kubekey/v1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..83582458
--- /dev/null
+++ b/pkg/apis/kubekey/v1/zz_generated.deepcopy.go
@@ -0,0 +1,402 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigList) DeepCopyInto(out *ConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Config, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
+func (in *ConfigList) DeepCopy() *ConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Inventory) DeepCopyInto(out *Inventory) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inventory.
+func (in *Inventory) DeepCopy() *Inventory {
+ if in == nil {
+ return nil
+ }
+ out := new(Inventory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Inventory) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventoryGroup) DeepCopyInto(out *InventoryGroup) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Vars.DeepCopyInto(&out.Vars)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryGroup.
+func (in *InventoryGroup) DeepCopy() *InventoryGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in InventoryHost) DeepCopyInto(out *InventoryHost) {
+ {
+ in := &in
+ *out = make(InventoryHost, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHost.
+func (in InventoryHost) DeepCopy() InventoryHost {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryHost)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventoryList) DeepCopyInto(out *InventoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Inventory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryList.
+func (in *InventoryList) DeepCopy() *InventoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(InventoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InventoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InventorySpec) DeepCopyInto(out *InventorySpec) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make(InventoryHost, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ in.Vars.DeepCopyInto(&out.Vars)
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make(map[string]InventoryGroup, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySpec.
+func (in *InventorySpec) DeepCopy() *InventorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InventorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Pipeline) DeepCopyInto(out *Pipeline) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
+func (in *Pipeline) DeepCopy() *Pipeline {
+ if in == nil {
+ return nil
+ }
+ out := new(Pipeline)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Pipeline) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineFailedDetail) DeepCopyInto(out *PipelineFailedDetail) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]PipelineFailedDetailHost, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetail.
+func (in *PipelineFailedDetail) DeepCopy() *PipelineFailedDetail {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineFailedDetail)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineFailedDetailHost) DeepCopyInto(out *PipelineFailedDetailHost) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetailHost.
+func (in *PipelineFailedDetailHost) DeepCopy() *PipelineFailedDetailHost {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineFailedDetailHost)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineList) DeepCopyInto(out *PipelineList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Pipeline, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
+func (in *PipelineList) DeepCopy() *PipelineList {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PipelineList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineProject) DeepCopyInto(out *PipelineProject) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineProject.
+func (in *PipelineProject) DeepCopy() *PipelineProject {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineProject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
+ *out = *in
+ out.Project = in.Project
+ if in.InventoryRef != nil {
+ in, out := &in.InventoryRef, &out.InventoryRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.ConfigRef != nil {
+ in, out := &in.ConfigRef, &out.ConfigRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkipTags != nil {
+ in, out := &in.SkipTags, &out.SkipTags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
+func (in *PipelineSpec) DeepCopy() *PipelineSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
+ *out = *in
+ out.TaskResult = in.TaskResult
+ if in.FailedDetail != nil {
+ in, out := &in.FailedDetail, &out.FailedDetail
+ *out = make([]PipelineFailedDetail, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus.
+func (in *PipelineStatus) DeepCopy() *PipelineStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PipelineTaskResult) DeepCopyInto(out *PipelineTaskResult) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResult.
+func (in *PipelineTaskResult) DeepCopy() *PipelineTaskResult {
+ if in == nil {
+ return nil
+ }
+ out := new(PipelineTaskResult)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/kubekey/v1alpha1/register.go b/pkg/apis/kubekey/v1alpha1/register.go
new file mode 100644
index 00000000..0469f018
--- /dev/null
+++ b/pkg/apis/kubekey/v1alpha1/register.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 is the internal version. should not register in kubernetes
+// +k8s:deepcopy-gen=package,register
+// +groupName=kubekey.kubesphere.io
+// +kubebuilder:skip
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/pkg/apis/kubekey/v1alpha1/task_types.go b/pkg/apis/kubekey/v1alpha1/task_types.go
new file mode 100644
index 00000000..a4c5a8ca
--- /dev/null
+++ b/pkg/apis/kubekey/v1alpha1/task_types.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+type TaskPhase string
+
+const (
+ TaskPhasePending TaskPhase = "Pending"
+ TaskPhaseRunning TaskPhase = "Running"
+ TaskPhaseSuccess TaskPhase = "Success"
+ TaskPhaseFailed TaskPhase = "Failed"
+ TaskPhaseSkipped TaskPhase = "Skipped"
+ TaskPhaseIgnored TaskPhase = "Ignored"
+)
+
+const (
+ // TaskAnnotationRole is the absolute dir of task in project.
+ TaskAnnotationRole = "kubesphere.io/role"
+)
+
+type KubeKeyTaskSpec struct {
+ Name string `json:"name,omitempty"`
+ Hosts []string `json:"hosts,omitempty"`
+ IgnoreError bool `json:"ignoreError,omitempty"`
+ Retries int `json:"retries,omitempty"`
+
+ When []string `json:"when,omitempty"`
+ FailedWhen []string `json:"failedWhen,omitempty"`
+ Loop runtime.RawExtension `json:"loop,omitempty"`
+
+ Module Module `json:"module,omitempty"`
+ Register string `json:"register,omitempty"`
+}
+
+type Module struct {
+ Name string `json:"name,omitempty"`
+ Args runtime.RawExtension `json:"args,omitempty"`
+}
+
+type TaskStatus struct {
+ RestartCount int `json:"restartCount,omitempty"`
+ Phase TaskPhase `json:"phase,omitempty"`
+ Conditions []TaskCondition `json:"conditions,omitempty"`
+ FailedDetail []TaskFailedDetail `json:"failedDetail,omitempty"`
+}
+
+type TaskCondition struct {
+ StartTimestamp metav1.Time `json:"startTimestamp,omitempty"`
+ EndTimestamp metav1.Time `json:"endTimestamp,omitempty"`
+ // HostResults of runtime.RawExtension host. the key is host name. value is host result
+ HostResults []TaskHostResult `json:"hostResults,omitempty"`
+}
+
+type TaskFailedDetail struct {
+ Host string `json:"host,omitempty"`
+ Stdout string `json:"stdout,omitempty"`
+ StdErr string `json:"stdErr,omitempty"`
+}
+
+type TaskHostResult struct {
+ Host string `json:"host,omitempty"`
+ Stdout string `json:"stdout,omitempty"`
+ StdErr string `json:"stdErr,omitempty"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:scope=Namespaced
+
+type Task struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec KubeKeyTaskSpec `json:"spec,omitempty"`
+ Status TaskStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type TaskList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Task `json:"items"`
+}
+
+func (t Task) IsComplete() bool {
+ return t.IsSucceed() || t.IsFailed() || t.IsSkipped()
+}
+
+func (t Task) IsSkipped() bool {
+ return t.Status.Phase == TaskPhaseSkipped
+}
+
+func (t Task) IsSucceed() bool {
+ return t.Status.Phase == TaskPhaseSuccess || t.Status.Phase == TaskPhaseIgnored
+}
+func (t Task) IsFailed() bool {
+ return t.Status.Phase == TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount
+}
diff --git a/pkg/apis/kubekey/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kubekey/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..32e717b3
--- /dev/null
+++ b/pkg/apis/kubekey/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,211 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeKeyTaskSpec) DeepCopyInto(out *KubeKeyTaskSpec) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.When != nil {
+ in, out := &in.When, &out.When
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FailedWhen != nil {
+ in, out := &in.FailedWhen, &out.FailedWhen
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Loop.DeepCopyInto(&out.Loop)
+ in.Module.DeepCopyInto(&out.Module)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeKeyTaskSpec.
+func (in *KubeKeyTaskSpec) DeepCopy() *KubeKeyTaskSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeKeyTaskSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Module) DeepCopyInto(out *Module) {
+ *out = *in
+ in.Args.DeepCopyInto(&out.Args)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module.
+func (in *Module) DeepCopy() *Module {
+ if in == nil {
+ return nil
+ }
+ out := new(Module)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Task) DeepCopyInto(out *Task) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task.
+func (in *Task) DeepCopy() *Task {
+ if in == nil {
+ return nil
+ }
+ out := new(Task)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Task) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskCondition) DeepCopyInto(out *TaskCondition) {
+ *out = *in
+ in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
+ in.EndTimestamp.DeepCopyInto(&out.EndTimestamp)
+ if in.HostResults != nil {
+ in, out := &in.HostResults, &out.HostResults
+ *out = make([]TaskHostResult, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskCondition.
+func (in *TaskCondition) DeepCopy() *TaskCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskFailedDetail) DeepCopyInto(out *TaskFailedDetail) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskFailedDetail.
+func (in *TaskFailedDetail) DeepCopy() *TaskFailedDetail {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskFailedDetail)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskHostResult) DeepCopyInto(out *TaskHostResult) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskHostResult.
+func (in *TaskHostResult) DeepCopy() *TaskHostResult {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskHostResult)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskList) DeepCopyInto(out *TaskList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Task, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList.
+func (in *TaskList) DeepCopy() *TaskList {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TaskList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TaskStatus) DeepCopyInto(out *TaskStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]TaskCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailedDetail != nil {
+ in, out := &in.FailedDetail, &out.FailedDetail
+ *out = make([]TaskFailedDetail, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus.
+func (in *TaskStatus) DeepCopy() *TaskStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TaskStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go
new file mode 100644
index 00000000..68e25daa
--- /dev/null
+++ b/pkg/cache/cache.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "sync"
+)
+
+// Cache is the interface for cache.
+type Cache interface {
+ // Name of pool
+ Name() string
+ // Put the cached value for the given key.
+ Put(key string, value any)
+ // Get the cached value for the given key.
+ Get(key string) (any, bool)
+ // Release the cached value for the given id.
+ Release(id string)
+ // Clean all cached value
+ Clean()
+}
+
+type local struct {
+ name string
+ cache map[string]any
+
+ sync.Mutex
+}
+
+func (p *local) Name() string {
+ return p.name
+}
+
+func (p *local) Put(key string, value any) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.cache[key] = value
+}
+
+func (p *local) Get(key string) (any, bool) {
+ v, ok := p.cache[key]
+ if ok {
+ return v, ok
+ }
+ return v, false
+}
+
+func (p *local) Release(id string) {
+ p.Lock()
+ defer p.Unlock()
+
+ delete(p.cache, id)
+}
+
+func (p *local) Clean() {
+ p.Lock()
+ defer p.Unlock()
+ for id := range p.cache {
+ delete(p.cache, id)
+ }
+}
+
+// NewLocalCache return a local cache
+func NewLocalCache(name string) Cache {
+ return &local{
+ name: name,
+ cache: make(map[string]any),
+ }
+}
+
+var (
+ // LocalVariable is a local cache for variable.Variable
+ LocalVariable = NewLocalCache("variable")
+)
diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go
new file mode 100644
index 00000000..031ef1d4
--- /dev/null
+++ b/pkg/cache/cache_test.go
@@ -0,0 +1,31 @@
+package cache
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCache(t *testing.T) {
+ testCache := NewLocalCache("test")
+ assert.Equal(t, "test", testCache.Name())
+
+ // should not be able to get the key
+ _, ok := testCache.Get("foo")
+ assert.False(t, ok)
+
+ // put a key
+ testCache.Put("foo", "bar")
+
+ // should be able to get the key
+ v, ok := testCache.Get("foo")
+ assert.True(t, ok)
+ assert.Equal(t, "bar", v)
+
+ // release the key
+ testCache.Release("foo")
+
+ // should not be able to get the key
+ _, ok = testCache.Get("foo")
+ assert.False(t, ok)
+}
diff --git a/pkg/cache/runtime_client.go b/pkg/cache/runtime_client.go
new file mode 100644
index 00000000..64d1866f
--- /dev/null
+++ b/pkg/cache/runtime_client.go
@@ -0,0 +1,386 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ jsonpatch "github.com/evanphx/json-patch"
+ apimeta "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "sigs.k8s.io/yaml"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+type delegatingClient struct {
+ client ctrlclient.Client
+ scheme *runtime.Scheme
+}
+
+func NewDelegatingClient(client ctrlclient.Client) ctrlclient.Client {
+ scheme := runtime.NewScheme()
+ if err := kubekeyv1.AddToScheme(scheme); err != nil {
+ klog.Errorf("failed to add scheme: %v", err)
+ }
+ kubekeyv1.SchemeBuilder.Register(&kubekeyv1alpha1.Task{}, &kubekeyv1alpha1.TaskList{})
+ return &delegatingClient{
+ client: client,
+ scheme: scheme,
+ }
+}
+
+func (d delegatingClient) Get(ctx context.Context, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Get(ctx, key, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ path := filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, key.Namespace, resource, key.Name, key.Name+".yaml")
+ data, err := os.ReadFile(path)
+ if err != nil {
+ klog.Errorf("failed to read yaml file: %v", err)
+ return err
+ }
+ if err := yaml.Unmarshal(data, obj); err != nil {
+ klog.Errorf("unmarshal file %s error %v", path, err)
+ return err
+ }
+ return nil
+}
+
+func (d delegatingClient) List(ctx context.Context, list ctrlclient.ObjectList, opts ...ctrlclient.ListOption) error {
+ resource := _const.ResourceFromObject(list)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.List(ctx, list, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", list.GetObjectKind().GroupVersionKind().String())
+ }
+ // read all runtime.Object
+ var objects []runtime.Object
+ runtimeDirEntries, err := os.ReadDir(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir))
+ if err != nil && !os.IsNotExist(err) {
+ klog.Errorf("readDir %s error %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir), err)
+ return err
+ }
+ for _, re := range runtimeDirEntries {
+ if re.IsDir() {
+ resourceDir := filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, re.Name(), resource)
+ entries, err := os.ReadDir(resourceDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ klog.Errorf("readDir %s error %v", resourceDir, err)
+ return err
+ }
+ for _, e := range entries {
+ if !e.IsDir() {
+ continue
+ }
+ resourceFile := filepath.Join(resourceDir, e.Name(), e.Name()+".yaml")
+ data, err := os.ReadFile(resourceFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ klog.Errorf("read file %s error: %v", resourceFile, err)
+ return err
+ }
+ var obj runtime.Object
+ switch resource {
+ case _const.RuntimePipelineDir:
+ obj = &kubekeyv1.Pipeline{}
+ case _const.RuntimeInventoryDir:
+ obj = &kubekeyv1.Inventory{}
+ case _const.RuntimeConfigDir:
+ obj = &kubekeyv1.Config{}
+ case _const.RuntimePipelineTaskDir:
+ obj = &kubekeyv1alpha1.Task{}
+ }
+ if err := yaml.Unmarshal(data, &obj); err != nil {
+ klog.Errorf("unmarshal file %s error: %v", resourceFile, err)
+ return err
+ }
+ objects = append(objects, obj)
+ }
+ }
+ }
+
+ o := ctrlclient.ListOptions{}
+ o.ApplyOptions(opts)
+
+ switch {
+ case o.Namespace != "":
+ for i := len(objects) - 1; i >= 0; i-- {
+ if objects[i].(metav1.Object).GetNamespace() != o.Namespace {
+ objects = append(objects[:i], objects[i+1:]...)
+ }
+ }
+ }
+
+ if err := apimeta.SetList(list, objects); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (d delegatingClient) Create(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.CreateOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Create(ctx, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ if err := os.MkdirAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()), fs.ModePerm); err != nil {
+ klog.Errorf("create dir %s error: %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()), err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+}
+
+func (d delegatingClient) Delete(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Delete(ctx, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ return os.RemoveAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName()))
+}
+
+func (d delegatingClient) Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.UpdateOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Update(ctx, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+}
+
+func (d delegatingClient) Patch(ctx context.Context, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.PatchOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Patch(ctx, obj, patch, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ patchData, err := patch.Data(obj)
+ if err != nil {
+ klog.Errorf("failed to get patch data: %v", err)
+ return err
+ }
+ if len(patchData) == 0 {
+ klog.V(4).Infof("nothing to patch, skip")
+ return nil
+ }
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+}
+
+func (d delegatingClient) DeleteAllOf(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteAllOfOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.DeleteAllOf(ctx, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+ return d.Delete(ctx, obj)
+}
+
+func (d delegatingClient) Status() ctrlclient.SubResourceWriter {
+ if d.client != nil {
+ return d.client.Status()
+ }
+ return &delegatingSubResourceWriter{client: d.client}
+}
+
+func (d delegatingClient) SubResource(subResource string) ctrlclient.SubResourceClient {
+ if d.client != nil {
+ return d.client.SubResource(subResource)
+ }
+ return nil
+}
+
+func (d delegatingClient) Scheme() *runtime.Scheme {
+ if d.client != nil {
+ return d.client.Scheme()
+ }
+ return d.scheme
+}
+
+func (d delegatingClient) RESTMapper() apimeta.RESTMapper {
+ if d.client != nil {
+ return d.client.RESTMapper()
+ }
+ return nil
+}
+
+func (d delegatingClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
+ if d.client != nil {
+ return d.client.GroupVersionKindFor(obj)
+ }
+ return apiutil.GVKForObject(obj, d.scheme)
+}
+
+func (d delegatingClient) IsObjectNamespaced(obj runtime.Object) (bool, error) {
+ if d.client != nil {
+ return d.client.IsObjectNamespaced(obj)
+ }
+ return true, nil
+}
+
+type delegatingSubResourceWriter struct {
+ client ctrlclient.Client
+}
+
+func (d delegatingSubResourceWriter) Create(ctx context.Context, obj ctrlclient.Object, subResource ctrlclient.Object, opts ...ctrlclient.SubResourceCreateOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Status().Create(ctx, obj, subResource, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+
+}
+
+func (d delegatingSubResourceWriter) Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.SubResourceUpdateOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Status().Update(ctx, obj, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+}
+
+func (d delegatingSubResourceWriter) Patch(ctx context.Context, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.SubResourcePatchOption) error {
+ resource := _const.ResourceFromObject(obj)
+ if d.client != nil && resource != _const.RuntimePipelineTaskDir {
+ return d.client.Status().Patch(ctx, obj, patch, opts...)
+ }
+ if resource == "" {
+ return fmt.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ }
+
+ patchData, err := patch.Data(obj)
+ if err != nil {
+ klog.Errorf("failed to get patch data: %v", err)
+ return err
+ }
+ if len(patchData) == 0 {
+ klog.V(4).Infof("nothing to patch, skip")
+ return nil
+ }
+ data, err := yaml.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal object: %v", err)
+ return err
+ }
+ return os.WriteFile(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir, obj.GetNamespace(), resource, obj.GetName(), obj.GetName()+".yaml"), data, fs.ModePerm)
+}
+
+func getPatchedJSON(patchType types.PatchType, originalJS, patchJS []byte, gvk schema.GroupVersionKind, creater runtime.ObjectCreater) ([]byte, error) {
+ switch patchType {
+ case types.JSONPatchType:
+ patchObj, err := jsonpatch.DecodePatch(patchJS)
+ if err != nil {
+ return nil, err
+ }
+ bytes, err := patchObj.Apply(originalJS)
+ // TODO: This is pretty hacky, we need a better structured error from the json-patch
+ if err != nil && strings.Contains(err.Error(), "doc is missing key") {
+ msg := err.Error()
+ ix := strings.Index(msg, "key:")
+ key := msg[ix+5:]
+ return bytes, fmt.Errorf("Object to be patched is missing field (%s)", key)
+ }
+ return bytes, err
+
+ case types.MergePatchType:
+ return jsonpatch.MergePatch(originalJS, patchJS)
+
+ case types.StrategicMergePatchType:
+ // get a typed object for this GVK if we need to apply a strategic merge patch
+ obj, err := creater.New(gvk)
+ if err != nil {
+ return nil, fmt.Errorf("cannot apply strategic merge patch for %s locally, try --type merge", gvk.String())
+ }
+ return strategicpatch.StrategicMergePatch(originalJS, patchJS, obj)
+
+ default:
+ // only here as a safety net - go-restful filters content-type
+ return nil, fmt.Errorf("unknown Content-Type header for patch: %v", patchType)
+ }
+}
diff --git a/pkg/connector/connector.go b/pkg/connector/connector.go
new file mode 100644
index 00000000..e8f5cd39
--- /dev/null
+++ b/pkg/connector/connector.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "os"
+
+ "k8s.io/utils/exec"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// Connector is the interface for connecting to a remote host
+type Connector interface {
+ // Init initializes the connection
+ Init(ctx context.Context) error
+ // Close closes the connection
+ Close(ctx context.Context) error
+ // CopyFile copies a file from local to remote
+ CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error
+ // FetchFile copies a file from remote to local
+ FetchFile(ctx context.Context, remoteFile string, local io.Writer) error
+ // ExecuteCommand executes a command on the remote host
+ ExecuteCommand(ctx context.Context, cmd string) ([]byte, error)
+}
+
+// NewConnector creates a new connector
+func NewConnector(host string, vars variable.VariableData) Connector {
+ switch vars["connector"] {
+ case "local":
+ return &localConnector{Cmd: exec.New()}
+ case "ssh":
+ if variable.StringVar(vars, "ssh_host") != nil {
+ host = *variable.StringVar(vars, "ssh_host")
+ }
+ return &sshConnector{
+ Host: host,
+ Port: variable.IntVar(vars, "ssh_port"),
+ User: variable.StringVar(vars, "ssh_user"),
+ Password: variable.StringVar(vars, "ssh_password"),
+ }
+ default:
+ localHost, _ := os.Hostname()
+ if localHost == host {
+ return &localConnector{Cmd: exec.New()}
+ }
+
+ if variable.StringVar(vars, "ssh_host") != nil {
+ host = *variable.StringVar(vars, "ssh_host")
+ }
+ return &sshConnector{
+ Host: host,
+ Port: variable.IntVar(vars, "ssh_port"),
+ User: variable.StringVar(vars, "ssh_user"),
+ Password: variable.StringVar(vars, "ssh_password"),
+ }
+ }
+}
diff --git a/pkg/connector/local_connector.go b/pkg/connector/local_connector.go
new file mode 100644
index 00000000..63be5ddd
--- /dev/null
+++ b/pkg/connector/local_connector.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "k8s.io/klog/v2"
+ "k8s.io/utils/exec"
+)
+
+type localConnector struct {
+ Cmd exec.Interface
+}
+
+func (c *localConnector) Init(ctx context.Context) error {
+ return nil
+}
+
+func (c *localConnector) Close(ctx context.Context) error {
+ return nil
+}
+
+func (c *localConnector) CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
+ // create remote file
+ if _, err := os.Stat(filepath.Dir(remoteFile)); err != nil {
+ klog.Warningf("Failed to stat dir %s: %v create it", filepath.Dir(remoteFile), err)
+ if err := os.MkdirAll(filepath.Dir(remoteFile), mode); err != nil {
+ klog.Errorf("Failed to create dir %s: %v", filepath.Dir(remoteFile), err)
+ return err
+ }
+ }
+ rf, err := os.Create(remoteFile)
+ if err != nil {
+ klog.Errorf("Failed to create file %s: %v", remoteFile, err)
+ return err
+ }
+ if _, err := rf.Write(local); err != nil {
+ klog.Errorf("Failed to write file %s: %v", remoteFile, err)
+ return err
+ }
+ return rf.Chmod(mode)
+}
+
+func (c *localConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error {
+ var err error
+ file, err := os.Open(remoteFile)
+ if err != nil {
+ klog.Errorf("Failed to read file %s: %v", remoteFile, err)
+ return err
+ }
+ if _, err := io.Copy(local, file); err != nil {
+ klog.Errorf("Failed to copy file %s: %v", remoteFile, err)
+ return err
+ }
+ return nil
+}
+
+func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
+ return c.Cmd.CommandContext(ctx, cmd).CombinedOutput()
+}
+
+func (c *localConnector) copyFile(sourcePath, destinationPath string) error {
+ sourceFile, err := os.Open(sourcePath)
+ if err != nil {
+ return err
+ }
+ defer sourceFile.Close()
+
+ destinationFile, err := os.Create(destinationPath)
+ if err != nil {
+ return err
+ }
+ defer destinationFile.Close()
+
+ _, err = io.Copy(destinationFile, sourceFile)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/connector/local_connector_test.go b/pkg/connector/local_connector_test.go
new file mode 100644
index 00000000..da0b3fc3
--- /dev/null
+++ b/pkg/connector/local_connector_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/utils/exec"
+ testingexec "k8s.io/utils/exec/testing"
+)
+
+func newFakeLocalConnector(runCmd string, output string) *localConnector {
+ return &localConnector{
+ Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{
+ func(cmd string, args ...string) exec.Cmd {
+ if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == runCmd {
+ return &testingexec.FakeCmd{
+ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
+ return []byte(output), nil, nil
+ }},
+ }
+ }
+ return &testingexec.FakeCmd{
+ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
+ return nil, nil, fmt.Errorf("error command")
+ }},
+ }
+ },
+ }},
+ }
+}
+
+func TestSshConnector_ExecuteCommand(t *testing.T) {
+ testcases := []struct {
+ name string
+ cmd string
+ exceptedErr error
+ }{
+ {
+ name: "execute command succeed",
+ cmd: "echo 'hello'",
+ exceptedErr: nil,
+ },
+ {
+ name: "execute command failed",
+ cmd: "echo 'hello1'",
+ exceptedErr: fmt.Errorf("error command"),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ lc := newFakeLocalConnector("echo 'hello'", "hello")
+ _, err := lc.ExecuteCommand(ctx, tc.cmd)
+ assert.Equal(t, tc.exceptedErr, err)
+ })
+ }
+}
diff --git a/pkg/connector/ssh_connector.go b/pkg/connector/ssh_connector.go
new file mode 100644
index 00000000..417ad2ff
--- /dev/null
+++ b/pkg/connector/ssh_connector.go
@@ -0,0 +1,131 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package connector
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "strconv"
+
+ "github.com/pkg/sftp"
+ "golang.org/x/crypto/ssh"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/pointer"
+)
+
+type sshConnector struct {
+ Host string
+ Port *int
+ User *string
+ Password *string
+ client *ssh.Client
+}
+
+func (c *sshConnector) Init(ctx context.Context) error {
+ if c.Host == "" {
+ return fmt.Errorf("host is not set")
+ }
+ if c.Port == nil {
+ c.Port = pointer.Int(22)
+ }
+ var auth []ssh.AuthMethod
+ if c.Password != nil {
+ auth = []ssh.AuthMethod{
+ ssh.Password(*c.Password),
+ }
+ }
+ sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.Host, strconv.Itoa(*c.Port)), &ssh.ClientConfig{
+ User: pointer.StringDeref(c.User, ""),
+ Auth: auth,
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ })
+ if err != nil {
+ return err
+ }
+ c.client = sshClient
+
+ return nil
+}
+
+func (c *sshConnector) Close(ctx context.Context) error {
+ return c.client.Close()
+}
+
+func (c *sshConnector) CopyFile(ctx context.Context, src []byte, remoteFile string, mode fs.FileMode) error {
+ // create sftp client
+ sftpClient, err := sftp.NewClient(c.client)
+ if err != nil {
+ klog.Errorf("Failed to create sftp client: %v", err)
+ return err
+ }
+ defer sftpClient.Close()
+ // create remote file
+ if _, err := sftpClient.Stat(filepath.Dir(remoteFile)); err != nil {
+ klog.Warningf("Failed to stat dir %s: %v create it", filepath.Dir(remoteFile), err)
+ if err := sftpClient.MkdirAll(filepath.Dir(remoteFile)); err != nil {
+ klog.Errorf("Failed to create dir %s: %v", filepath.Dir(remoteFile), err)
+ return err
+ }
+ }
+ rf, err := sftpClient.Create(remoteFile)
+ if err != nil {
+ klog.Errorf("Failed to create file %s: %v", remoteFile, err)
+ return err
+ }
+ defer rf.Close()
+
+ if _, err = rf.Write(src); err != nil {
+ klog.Errorf("Failed to write file %s: %v", remoteFile, err)
+ return err
+ }
+ return rf.Chmod(mode)
+}
+
+func (c *sshConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error {
+ // create sftp client
+ sftpClient, err := sftp.NewClient(c.client)
+ if err != nil {
+ klog.Errorf("Failed to create sftp client: %v", err)
+ return err
+ }
+ defer sftpClient.Close()
+ rf, err := sftpClient.Open(remoteFile)
+ if err != nil {
+ klog.Errorf("Failed to open file %s: %v", remoteFile, err)
+ return err
+ }
+ defer rf.Close()
+ if _, err := io.Copy(local, rf); err != nil {
+ klog.Errorf("Failed to copy file %s: %v", remoteFile, err)
+ return err
+ }
+ return nil
+}
+
+func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
+ // create ssh session
+ session, err := c.client.NewSession()
+ if err != nil {
+ return nil, err
+ }
+ defer session.Close()
+
+ return session.CombinedOutput(cmd)
+}
diff --git a/pkg/const/context.go b/pkg/const/context.go
new file mode 100644
index 00000000..e06246c3
--- /dev/null
+++ b/pkg/const/context.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+// key in context
+
+// use in marshal playbook.Block
+const (
+ CtxBlockHosts = "block-hosts"
+ CtxBlockRole = "block-role"
+ CtxBlockWhen = "block-when"
+ CtxBlockTaskUID = "block-task-uid"
+)
diff --git a/pkg/const/helper.go b/pkg/const/helper.go
new file mode 100644
index 00000000..8496ff1a
--- /dev/null
+++ b/pkg/const/helper.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+import (
+ "path/filepath"
+ "sync"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+)
+
+var workDirOnce = &sync.Once{}
+
+// SetWorkDir sets the workdir once.
+func SetWorkDir(wd string) {
+ workDirOnce.Do(func() {
+ workDir = wd
+ })
+}
+
+// GetWorkDir returns the workdir.
+func GetWorkDir() string {
+ return workDir
+}
+
+func ResourceFromObject(obj runtime.Object) string {
+ switch obj.(type) {
+ case *kubekeyv1.Pipeline, *kubekeyv1.PipelineList:
+ return RuntimePipelineDir
+ case *kubekeyv1.Config, *kubekeyv1.ConfigList:
+ return RuntimeConfigDir
+ case *kubekeyv1.Inventory, *kubekeyv1.InventoryList:
+ return RuntimeInventoryDir
+ case *kubekeyv1alpha1.Task, *kubekeyv1alpha1.TaskList:
+ return RuntimePipelineTaskDir
+ default:
+ return ""
+ }
+}
+
+func RuntimeDirFromObject(obj runtime.Object) string {
+ resource := ResourceFromObject(obj)
+ if resource == "" {
+ klog.Errorf("unsupported object type: %s", obj.GetObjectKind().GroupVersionKind().String())
+ return ""
+ }
+ mo, ok := obj.(metav1.Object)
+
+ if !ok {
+ klog.Errorf("failed convert to metav1.Object: %s", obj.GetObjectKind().GroupVersionKind().String())
+ return ""
+ }
+ return filepath.Join(workDir, RuntimeDir, mo.GetNamespace(), resource, mo.GetName())
+}
diff --git a/pkg/const/helper_test.go b/pkg/const/helper_test.go
new file mode 100644
index 00000000..7e2a94e3
--- /dev/null
+++ b/pkg/const/helper_test.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+)
+
+func TestWorkDir(t *testing.T) {
+ // should not get workdir before set
+ assert.Empty(t, GetWorkDir())
+ // set workdir
+ SetWorkDir("/tmp")
+ assert.Equal(t, "/tmp", GetWorkDir())
+ // should not set workdir again
+ SetWorkDir("/tmp2")
+ assert.Equal(t, "/tmp", GetWorkDir())
+}
+
+func TestResourceFromObject(t *testing.T) {
+ assert.Equal(t, RuntimePipelineDir, ResourceFromObject(&kubekeyv1.Pipeline{}))
+ assert.Equal(t, RuntimePipelineDir, ResourceFromObject(&kubekeyv1.PipelineList{}))
+ assert.Equal(t, RuntimeConfigDir, ResourceFromObject(&kubekeyv1.Config{}))
+ assert.Equal(t, RuntimeConfigDir, ResourceFromObject(&kubekeyv1.ConfigList{}))
+ assert.Equal(t, RuntimeInventoryDir, ResourceFromObject(&kubekeyv1.Inventory{}))
+ assert.Equal(t, RuntimeInventoryDir, ResourceFromObject(&kubekeyv1.InventoryList{}))
+ assert.Equal(t, RuntimePipelineTaskDir, ResourceFromObject(&kubekeyv1alpha1.Task{}))
+ assert.Equal(t, RuntimePipelineTaskDir, ResourceFromObject(&kubekeyv1alpha1.TaskList{}))
+ assert.Equal(t, "", ResourceFromObject(&unstructured.Unstructured{}))
+}
diff --git a/pkg/const/workdir.go b/pkg/const/workdir.go
new file mode 100644
index 00000000..3d3a3b97
--- /dev/null
+++ b/pkg/const/workdir.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package _const
+
+/** a kubekey workdir like that:
+workdir/
+|-- projects/
+| |-- ansible-project1/
+| | |-- playbooks/
+| | |-- roles/
+| | | |-- roleName/
+| | | | |-- tasks/
+| | | | | |-- main.yml
+| | | | |-- defaults/
+| | | | | |-- main.yml
+| | | | |-- templates/
+| | | | |-- files/
+| |
+| |-- ansible-project2/
+| |
+|
+|-- runtime/
+| |-- namespace/
+| | |-- pipelines/
+| | | |-- pipelineName/
+| | | | |-- pipeline.yaml
+| | | | |-- variable/
+| | | | | |-- location.json
+| | | | | |-- hostname.json
+| | |-- tasks/
+| | | |-- taskName/
+| | | | |-- task.yaml
+| | |-- configs/
+| | | |-- configName/
+| | | | |-- config.yaml
+| | |-- inventories/
+| | | |-- inventoryName/
+| | | | |-- inventory.yaml
+*/
+
+// workDir is the user-specified working directory. By default, it is the same as the directory where the kubekey command is executed.
+var workDir string
+
+// ProjectDir is a fixed directory name under workdir, used to store the Ansible project.
+const ProjectDir = "projects"
+
+// ansible-project is the name of different Ansible projects
+
+// ProjectPlaybooksDir is a fixed directory name under ansible-project. used to store executable playbook files.
+const ProjectPlaybooksDir = "playbooks"
+
+// ProjectRolesDir is a fixed directory name under ansible-project. used to store roles which playbook need.
+const ProjectRolesDir = "roles"
+
+// roleName is the name of different roles
+
+// ProjectRolesTasksDir is a fixed directory name under roleName. used to store task which role need.
+const ProjectRolesTasksDir = "tasks"
+
+// ProjectRolesTasksMainFile is a fixed file under tasks. it must run if the role run. support *.yaml or *yml
+const ProjectRolesTasksMainFile = "main"
+
+// ProjectRolesDefaultsDir is a fixed directory name under roleName. it set default variables to role.
+const ProjectRolesDefaultsDir = "defaults"
+
+// ProjectRolesDefaultsMainFile is a fixed file under defaults. support *.yaml or *yml
+const ProjectRolesDefaultsMainFile = "main"
+
+// ProjectRolesTemplateDir is a fixed directory name under roleName. used to store template which task need.
+const ProjectRolesTemplateDir = "templates"
+
+// ProjectRolesFilesDir is a fixed directory name under roleName. used to store files which task need.
+const ProjectRolesFilesDir = "files"
+
+// RuntimeDir is a fixed directory name under workdir, used to store the runtime data of the current task execution.
+const RuntimeDir = "runtime"
+
+// namespace is the namespace for resource of Pipeline,Task,Config,Inventory.
+
+// RuntimePipelineDir store Pipeline resources
+const RuntimePipelineDir = "pipelines"
+
+// pipelineName is the name of Pipeline resource
+
+// pipeline.yaml is the data of Pipeline resource
+
+// RuntimePipelineVariableDir is a fixed directory name under runtime, used to store the task execution parameters.
+const RuntimePipelineVariableDir = "variable"
+
+// RuntimePipelineVariableLocationFile is a location variable file under RuntimePipelineVariableDir
+const RuntimePipelineVariableLocationFile = "location.json"
+
+// hostname.json is host variable file under RuntimePipelineVariableDir. Each host has a separate file.
+
+// RuntimePipelineTaskDir is a fixed directory name under runtime, used to store the task execution status.
+const RuntimePipelineTaskDir = "tasks"
+
+// taskName is the name of Task resource
+
+// task.yaml is the data of Task resource
+
+// RuntimeConfigDir store Config resources
+const RuntimeConfigDir = "configs"
+
+// configName is the name of Config resource
+
+// config.yaml is the data of Config resource
+
+// RuntimeInventoryDir store Inventory resources
+const RuntimeInventoryDir = "inventories"
+
+// inventoryName is the name of Inventory resource
+
+// inventory.yaml is the data of Inventory resource
diff --git a/pkg/controllers/options.go b/pkg/controllers/options.go
new file mode 100644
index 00000000..dd92f33d
--- /dev/null
+++ b/pkg/controllers/options.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
+
+type Options struct {
+ ControllerGates []string
+
+ ctrlcontroller.Options
+}
+
+// IsControllerEnabled check if a specified controller enabled or not.
+func (o Options) IsControllerEnabled(name string) bool {
+ hasStar := false
+ for _, ctrl := range o.ControllerGates {
+ if ctrl == name {
+ return true
+ }
+ if ctrl == "-"+name {
+ return false
+ }
+ if ctrl == "*" {
+ hasStar = true
+ }
+ }
+
+ return hasStar
+}
diff --git a/pkg/controllers/pipeline_controller.go b/pkg/controllers/pipeline_controller.go
new file mode 100644
index 00000000..0ac4d040
--- /dev/null
+++ b/pkg/controllers/pipeline_controller.go
@@ -0,0 +1,138 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/task"
+)
+
+type PipelineReconciler struct {
+ ctrlclient.Client
+ record.EventRecorder
+
+ TaskController task.Controller
+}
+
+func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ klog.Infof("[Pipeline %s] begin reconcile", req.NamespacedName.String())
+ defer func() {
+ klog.Infof("[Pipeline %s] end reconcile", req.NamespacedName.String())
+ }()
+
+ pipeline := &kubekeyv1.Pipeline{}
+ err := r.Client.Get(ctx, req.NamespacedName, pipeline)
+ if err != nil {
+ if errors.IsNotFound(err) {
+ klog.V(5).Infof("[Pipeline %s] pipeline not found", req.NamespacedName.String())
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, err
+ }
+
+ if pipeline.DeletionTimestamp != nil {
+ klog.V(5).Infof("[Pipeline %s] pipeline is deleting", req.NamespacedName.String())
+ return ctrl.Result{}, nil
+ }
+
+ switch pipeline.Status.Phase {
+ case "":
+ excepted := pipeline.DeepCopy()
+ pipeline.Status.Phase = kubekeyv1.PipelinePhasePending
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
+ klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
+ return ctrl.Result{}, err
+ }
+ case kubekeyv1.PipelinePhasePending:
+ excepted := pipeline.DeepCopy()
+ pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
+ klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
+ return ctrl.Result{}, err
+ }
+ case kubekeyv1.PipelinePhaseRunning:
+ return r.dealRunningPipeline(ctx, pipeline)
+ case kubekeyv1.PipelinePhaseFailed:
+ r.clean(ctx, pipeline)
+ case kubekeyv1.PipelinePhaseSucceed:
+ r.clean(ctx, pipeline)
+ }
+ return ctrl.Result{}, nil
+}
+
+func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kubekeyv1.Pipeline) (ctrl.Result, error) {
+ if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok {
+ // if pipeline is paused, do nothing
+ klog.V(5).Infof("[Pipeline %s] pipeline is paused", ctrlclient.ObjectKeyFromObject(pipeline))
+ return ctrl.Result{}, nil
+ }
+
+ cp := pipeline.DeepCopy()
+ defer func() {
+ // update pipeline status
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil {
+ klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
+ }
+ }()
+
+ if err := r.TaskController.AddTasks(ctx, task.AddTaskOptions{
+ Pipeline: pipeline,
+ }); err != nil {
+ klog.Errorf("[Pipeline %s] add task error: %v", ctrlclient.ObjectKeyFromObject(pipeline), err)
+ pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
+ pipeline.Status.Reason = fmt.Sprintf("add task to controller failed: %v", err)
+ return ctrl.Result{}, err
+ }
+
+ return ctrl.Result{}, nil
+}
+
+// clean runtime directory
+func (r *PipelineReconciler) clean(ctx context.Context, pipeline *kubekeyv1.Pipeline) {
+ if !pipeline.Spec.Debug && pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
+ klog.Infof("[Pipeline %s] clean runtimeDir", ctrlclient.ObjectKeyFromObject(pipeline))
+ // clean runtime directory
+ if err := os.RemoveAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir)); err != nil {
+ klog.Errorf("clean runtime directory %s error: %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir), err)
+ }
+ }
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *PipelineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options Options) error {
+ if !options.IsControllerEnabled("pipeline") {
+ klog.V(5).Infof("pipeline controller is disabled")
+ return nil
+ }
+
+ return ctrl.NewControllerManagedBy(mgr).
+ WithOptions(options.Options).
+ For(&kubekeyv1.Pipeline{}).
+ Complete(r)
+}
diff --git a/pkg/controllers/task_controller.go b/pkg/controllers/task_controller.go
new file mode 100644
index 00000000..5d94e482
--- /dev/null
+++ b/pkg/controllers/task_controller.go
@@ -0,0 +1,412 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/strings/slices"
+ ctrl "sigs.k8s.io/controller-runtime"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+ "github.com/kubesphere/kubekey/v4/pkg/converter"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/modules"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type TaskReconciler struct {
+ // Client to resources
+ ctrlclient.Client
+ // VariableCache to store variable
+ VariableCache cache.Cache
+}
+
+type taskReconcileOptions struct {
+ *kubekeyv1.Pipeline
+ *kubekeyv1alpha1.Task
+ variable.Variable
+}
+
+func (r *TaskReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
+ klog.V(5).Infof("[Task %s] start reconcile", request.String())
+ defer klog.V(5).Infof("[Task %s] finish reconcile", request.String())
+ // get task
+ var task = &kubekeyv1alpha1.Task{}
+ if err := r.Client.Get(ctx, request.NamespacedName, task); err != nil {
+ klog.Errorf("get task %s error %v", request, err)
+ return ctrl.Result{}, nil
+ }
+
+ // if task is deleted, skip
+ if task.DeletionTimestamp != nil {
+ klog.V(5).Infof("[Task %s] task is deleted, skip", request.String())
+ return ctrl.Result{}, nil
+ }
+
+ // get pipeline
+ var pipeline = &kubekeyv1.Pipeline{}
+ for _, ref := range task.OwnerReferences {
+ if ref.Kind == "Pipeline" {
+ if err := r.Client.Get(ctx, types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}, pipeline); err != nil {
+ klog.Errorf("[Task %s] get pipeline %s error %v", request.String(), types.NamespacedName{Namespace: task.Namespace, Name: ref.Name}.String(), err)
+ if errors.IsNotFound(err) {
+ klog.V(4).Infof("[Task %s] pipeline is deleted, skip", request.String())
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, err
+ }
+ break
+ }
+ }
+
+ if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok {
+ klog.V(5).Infof("[Task %s] pipeline is paused, skip", request.String())
+ return ctrl.Result{}, nil
+ }
+
+ // get variable
+ var v variable.Variable
+ if vc, ok := r.VariableCache.Get(string(pipeline.UID)); !ok {
+ // create new variable
+ nv, err := variable.New(variable.Options{
+ Ctx: ctx,
+ Client: r.Client,
+ Pipeline: *pipeline,
+ })
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+ r.VariableCache.Put(string(pipeline.UID), nv)
+ v = nv
+ } else {
+ v = vc.(variable.Variable)
+ }
+
+ defer func() {
+ var nsTasks = &kubekeyv1alpha1.TaskList{}
+ klog.V(5).Infof("[Task %s] update pipeline %s status", ctrlclient.ObjectKeyFromObject(task).String(), ctrlclient.ObjectKeyFromObject(pipeline).String())
+ if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(task.Namespace)); err != nil {
+ klog.Errorf("[Task %s] list task error %v", ctrlclient.ObjectKeyFromObject(task).String(), err)
+ return
+ }
+ // filter by ownerReference
+ for i := len(nsTasks.Items) - 1; i >= 0; i-- {
+ var hasOwner bool
+ for _, ref := range nsTasks.Items[i].OwnerReferences {
+ if ref.UID == pipeline.UID && ref.Kind == "Pipeline" {
+ hasOwner = true
+ }
+ }
+
+ if !hasOwner {
+ nsTasks.Items = append(nsTasks.Items[:i], nsTasks.Items[i+1:]...)
+ }
+ }
+ cp := pipeline.DeepCopy()
+ converter.CalculatePipelineStatus(nsTasks, pipeline)
+ if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil {
+ klog.Errorf("[Task %s] update pipeline %s status error %v", ctrlclient.ObjectKeyFromObject(task).String(), pipeline.Name, err)
+ }
+ }()
+
+ switch task.Status.Phase {
+ case kubekeyv1alpha1.TaskPhaseFailed:
+ if task.Spec.Retries > task.Status.RestartCount {
+ task.Status.Phase = kubekeyv1alpha1.TaskPhasePending
+ task.Status.RestartCount++
+ if err := r.Client.Update(ctx, task); err != nil {
+ klog.Errorf("update task %s error %v", task.Name, err)
+ return ctrl.Result{}, err
+ }
+ }
+ return ctrl.Result{}, nil
+ case kubekeyv1alpha1.TaskPhasePending:
+ // deal pending task
+ return r.dealPendingTask(ctx, taskReconcileOptions{
+ Pipeline: pipeline,
+ Task: task,
+ Variable: v,
+ })
+ case kubekeyv1alpha1.TaskPhaseRunning:
+ // deal running task
+ return r.dealRunningTask(ctx, taskReconcileOptions{
+ Pipeline: pipeline,
+ Task: task,
+ Variable: v,
+ })
+ default:
+ return ctrl.Result{}, nil
+ }
+}
+
+func (r *TaskReconciler) dealPendingTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) {
+ // find dependency tasks
+ dl, err := options.Variable.Get(variable.DependencyTasks{
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ klog.Errorf("[Task %s] find dependency error %v", ctrlclient.ObjectKeyFromObject(options.Task).String(), err)
+ return ctrl.Result{}, err
+ }
+ dt, ok := dl.(variable.DependencyTask)
+ if !ok {
+ klog.Errorf("[Task %s] failed to convert dependency", ctrlclient.ObjectKeyFromObject(options.Task).String())
+ return ctrl.Result{}, fmt.Errorf("[Task %s] failed to convert dependency", ctrlclient.ObjectKeyFromObject(options.Task).String())
+ }
+
+ var nsTasks = &kubekeyv1alpha1.TaskList{}
+ if err := r.Client.List(ctx, nsTasks, ctrlclient.InNamespace(options.Task.Namespace)); err != nil {
+ klog.Errorf("[Task %s] list task error %v", ctrlclient.ObjectKeyFromObject(options.Task).String(), err)
+ return ctrl.Result{}, err
+ }
+ // filter by ownerReference
+ for i := len(nsTasks.Items) - 1; i >= 0; i-- {
+ var hasOwner bool
+ for _, ref := range nsTasks.Items[i].OwnerReferences {
+ if ref.UID == options.Pipeline.UID && ref.Kind == "Pipeline" {
+ hasOwner = true
+ }
+ }
+
+ if !hasOwner {
+ nsTasks.Items = append(nsTasks.Items[:i], nsTasks.Items[i+1:]...)
+ }
+ }
+ var dts []kubekeyv1alpha1.Task
+ for _, t := range nsTasks.Items {
+ if slices.Contains(dt.Tasks, string(t.UID)) {
+ dts = append(dts, t)
+ }
+ }
+ // Based on the results of the executed tasks dependent on, infer the next phase of the current task.
+ switch dt.Strategy(dts) {
+ case kubekeyv1alpha1.TaskPhasePending:
+ return ctrl.Result{Requeue: true}, nil
+ case kubekeyv1alpha1.TaskPhaseRunning:
+ // update task phase to running
+ options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseRunning
+ if err := r.Client.Update(ctx, options.Task); err != nil {
+ klog.Errorf("[Task %s] update task to Running error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ }
+ return ctrl.Result{Requeue: true}, nil
+ case kubekeyv1alpha1.TaskPhaseSkipped:
+ options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSkipped
+ if err := r.Client.Update(ctx, options.Task); err != nil {
+ klog.Errorf("[Task %s] update task to Skipped error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ }
+ return ctrl.Result{}, nil
+ default:
+ return ctrl.Result{}, fmt.Errorf("unknown TependencyTask.Strategy result. only support: Pending, Running, Skipped")
+ }
+}
+
+func (r *TaskReconciler) dealRunningTask(ctx context.Context, options taskReconcileOptions) (ctrl.Result, error) {
+ // find task in location
+ klog.Infof("[Task %s] dealRunningTask begin", ctrlclient.ObjectKeyFromObject(options.Task))
+ defer func() {
+ klog.Infof("[Task %s] dealRunningTask end, task phase: %s", ctrlclient.ObjectKeyFromObject(options.Task), options.Task.Status.Phase)
+ }()
+
+ if err := r.executeTask(ctx, options); err != nil {
+ klog.Errorf("[Task %s] execute task error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, nil
+}
+
+func (r *TaskReconciler) executeTask(ctx context.Context, options taskReconcileOptions) error {
+ cd := kubekeyv1alpha1.TaskCondition{
+ StartTimestamp: metav1.Now(),
+ }
+ defer func() {
+ cd.EndTimestamp = metav1.Now()
+ options.Task.Status.Conditions = append(options.Task.Status.Conditions, cd)
+ if err := r.Client.Update(ctx, options.Task); err != nil {
+ klog.Errorf("[Task %s] update task status error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ }
+ }()
+
+ // check task host results
+ wg := &wait.Group{}
+ dataChan := make(chan kubekeyv1alpha1.TaskHostResult, len(options.Task.Spec.Hosts))
+ for _, h := range options.Task.Spec.Hosts {
+ host := h
+ wg.StartWithContext(ctx, func(ctx context.Context) {
+ var stdout, stderr string
+ defer func() {
+ if stderr != "" {
+ klog.Errorf("[Task %s] run failed: %s", ctrlclient.ObjectKeyFromObject(options.Task), stderr)
+ }
+
+ dataChan <- kubekeyv1alpha1.TaskHostResult{
+ Host: host,
+ Stdout: stdout,
+ StdErr: stderr,
+ }
+ if options.Task.Spec.Register != "" {
+ puid, err := options.Variable.Get(variable.ParentLocation{LocationUID: string(options.Task.UID)})
+ if err != nil {
+ klog.Errorf("[Task %s] get location error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ return
+ }
+ // set variable to parent location
+ if err := options.Variable.Merge(variable.HostMerge{
+ HostNames: []string{h},
+ LocationUID: puid.(string),
+ Data: variable.VariableData{
+ options.Task.Spec.Register: map[string]string{
+ "stdout": stdout,
+ "stderr": stderr,
+ },
+ },
+ }); err != nil {
+ klog.Errorf("[Task %s] register error %v", ctrlclient.ObjectKeyFromObject(options.Task), err)
+ return
+ }
+ }
+ }()
+
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ stderr = err.Error()
+ return
+ }
+ // check when condition
+ if len(options.Task.Spec.When) > 0 {
+ ok, err := tmpl.ParseBool(lg.(variable.VariableData), options.Task.Spec.When)
+ if err != nil {
+ stderr = err.Error()
+ return
+ }
+ if !ok {
+ stdout = "skip by when"
+ return
+ }
+ }
+
+ data := variable.Extension2Slice(options.Task.Spec.Loop)
+ if len(data) == 0 {
+ stdout, stderr = r.executeModule(ctx, options.Task, modules.ExecOptions{
+ Args: options.Task.Spec.Module.Args,
+ Host: host,
+ Variable: options.Variable,
+ Task: *options.Task,
+ Pipeline: *options.Pipeline,
+ })
+ } else {
+ for _, item := range data {
+ switch item.(type) {
+ case string:
+ item, err = tmpl.ParseString(lg.(variable.VariableData), item.(string))
+ if err != nil {
+ stderr = err.Error()
+ return
+ }
+ case variable.VariableData:
+ for k, v := range item.(variable.VariableData) {
+ sv, err := tmpl.ParseString(lg.(variable.VariableData), v.(string))
+ if err != nil {
+ stderr = err.Error()
+ return
+ }
+ item.(map[string]any)[k] = sv
+ }
+ default:
+ stderr = "unknown loop vars, only support string or map[string]string"
+ return
+ }
+ // set item to runtime variable
+ options.Variable.Merge(variable.HostMerge{
+ HostNames: []string{h},
+ LocationUID: string(options.Task.UID),
+ Data: variable.VariableData{
+ "item": item,
+ },
+ })
+ stdout, stderr = r.executeModule(ctx, options.Task, modules.ExecOptions{
+ Args: options.Task.Spec.Module.Args,
+ Host: host,
+ Variable: options.Variable,
+ Task: *options.Task,
+ Pipeline: *options.Pipeline,
+ })
+ }
+ }
+ })
+ }
+ go func() {
+ wg.Wait()
+ close(dataChan)
+ }()
+
+ options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess
+ for data := range dataChan {
+ if data.StdErr != "" {
+ if options.Task.Spec.IgnoreError {
+ options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseIgnored
+ } else {
+ options.Task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed
+ options.Task.Status.FailedDetail = append(options.Task.Status.FailedDetail, kubekeyv1alpha1.TaskFailedDetail{
+ Host: data.Host,
+ Stdout: data.Stdout,
+ StdErr: data.StdErr,
+ })
+ }
+ }
+ cd.HostResults = append(cd.HostResults, data)
+ }
+
+ return nil
+}
+
+func (r *TaskReconciler) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task, opts modules.ExecOptions) (string, string) {
+ lg, err := opts.Variable.Get(variable.LocationVars{
+ HostName: opts.Host,
+ LocationUID: string(task.UID),
+ })
+ if err != nil {
+ klog.Errorf("[Task %s] get location variable error %v", ctrlclient.ObjectKeyFromObject(task), err)
+ return "", err.Error()
+ }
+
+ // check failed when condition
+ if len(task.Spec.FailedWhen) > 0 {
+ ok, err := tmpl.ParseBool(lg.(variable.VariableData), task.Spec.FailedWhen)
+ if err != nil {
+ klog.Errorf("[Task %s] validate FailedWhen condition error %v", ctrlclient.ObjectKeyFromObject(task), err)
+ return "", err.Error()
+ }
+ if ok {
+ return "", "failed by failedWhen"
+ }
+ }
+
+ return modules.FindModule(task.Spec.Module.Name)(ctx, opts)
+}
diff --git a/pkg/converter/converter.go b/pkg/converter/converter.go
new file mode 100644
index 00000000..27d517d7
--- /dev/null
+++ b/pkg/converter/converter.go
@@ -0,0 +1,432 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package converter
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "math"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "gopkg.in/yaml.v3"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/pointer"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// MarshalPlaybook kkcorev1.Playbook from a playbook file
+func MarshalPlaybook(baseFS fs.FS, pbPath string) (*kkcorev1.Playbook, error) {
+ // convert playbook to kkcorev1.Playbook
+ pb := &kkcorev1.Playbook{}
+ if err := loadPlaybook(baseFS, pbPath, pb); err != nil {
+ klog.Errorf(" load playbook with include %s failed: %v", pbPath, err)
+ return nil, err
+ }
+
+ // convertRoles
+ if err := convertRoles(baseFS, pbPath, pb); err != nil {
+ klog.Errorf("convertRoles error %v", err)
+ return nil, err
+ }
+
+ if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil {
+ klog.Errorf("convertIncludeTasks error %v", err)
+ return nil, err
+ }
+
+ if err := pb.Validate(); err != nil {
+ klog.Errorf("validate playbook %s failed: %v", pbPath, err)
+ return nil, err
+ }
+ return pb, nil
+}
+
+// loadPlaybook with include_playbook. Join all playbooks into one playbook
+func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
+ // baseDir is the local ansible project dir which playbook belong to
+ pbData, err := fs.ReadFile(baseFS, pbPath)
+ if err != nil {
+ klog.Errorf("read playbook %s failed: %v", pbPath, err)
+ return err
+ }
+ var plays []kkcorev1.Play
+ if err := yaml.Unmarshal(pbData, &plays); err != nil {
+ klog.Errorf("unmarshal playbook %s failed: %v", pbPath, err)
+ return err
+ }
+
+ for _, p := range plays {
+ if p.ImportPlaybook != "" {
+ importPlaybook := project.GetPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook)
+ if importPlaybook == "" {
+ return fmt.Errorf("cannot found import playbook %s", importPlaybook)
+ }
+ if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
+ return err
+ }
+ }
+
+ // fill block in roles
+ for i, r := range p.Roles {
+ roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if roleBase == "" {
+ return fmt.Errorf("cannot found role %s", r.Role)
+ }
+ mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
+ if mainTask == "" {
+ return fmt.Errorf("cannot found main task for role %s", r.Role)
+ }
+
+ rdata, err := fs.ReadFile(baseFS, mainTask)
+ if err != nil {
+ klog.Errorf("read role %s failed: %v", mainTask, err)
+ return err
+ }
+ var blocks []kkcorev1.Block
+ if err := yaml.Unmarshal(rdata, &blocks); err != nil {
+ klog.Errorf("unmarshal role %s failed: %v", r.Role, err)
+ return err
+ }
+ p.Roles[i].Block = blocks
+ }
+ pb.Play = append(pb.Play, p)
+ }
+
+ return nil
+}
+
+// convertRoles convert roleName to block
+func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
+ for i, p := range pb.Play {
+ for i, r := range p.Roles {
+ roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if roleBase == "" {
+ return fmt.Errorf("cannot found role %s", r.Role)
+ }
+
+ // load block
+ mainTask := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
+ if mainTask == "" {
+ return fmt.Errorf("cannot found main task for role %s", r.Role)
+ }
+
+ rdata, err := fs.ReadFile(baseFS, mainTask)
+ if err != nil {
+ klog.Errorf("read role %s failed: %v", mainTask, err)
+ return err
+ }
+ var blocks []kkcorev1.Block
+ if err := yaml.Unmarshal(rdata, &blocks); err != nil {
+ klog.Errorf("unmarshal role %s failed: %v", r.Role, err)
+ return err
+ }
+ p.Roles[i].Block = blocks
+
+ // load defaults (optional)
+ mainDefault := project.GetYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile))
+ if mainDefault != "" {
+ mainData, err := fs.ReadFile(baseFS, mainDefault)
+ if err != nil {
+ klog.Errorf("read defaults variable for role %s error: %v", r.Role, err)
+ return err
+ }
+ var vars variable.VariableData
+ if err := yaml.Unmarshal(mainData, &vars); err != nil {
+ klog.Errorf("unmarshal defaults variable for role %s error: %v", r.Role, err)
+ return err
+ }
+ p.Roles[i].Vars = vars
+ }
+ }
+ pb.Play[i] = p
+ }
+ return nil
+}
+
+// convertIncludeTasks from file to blocks
+func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
+ var pbBase = filepath.Dir(filepath.Dir(pbPath))
+ for _, play := range pb.Play {
+ if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
+ return err
+ }
+ if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
+ return err
+ }
+ if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
+ return err
+ }
+
+ for _, r := range play.Roles {
+ roleBase := project.GetRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
+ if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkcorev1.Block) error {
+ for i, b := range blocks {
+ if b.IncludeTasks != "" {
+ data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks))
+ if err != nil {
+ klog.Errorf("readFile %s error %v", filepath.Join(baseDir, b.IncludeTasks), err)
+ return err
+ }
+ var bs []kkcorev1.Block
+ if err := yaml.Unmarshal(data, &bs); err != nil {
+ klog.Errorf("unmarshal data %s to []Block error %v", filepath.Join(baseDir, b.IncludeTasks), err)
+ return err
+ }
+ b.Block = bs
+ blocks[i] = b
+ }
+ if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
+ return err
+ }
+ if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
+ return err
+ }
+ if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// MarshalBlock marshal block to task
+func MarshalBlock(ctx context.Context, block kkcorev1.Block, owner ctrlclient.Object) *kubekeyv1alpha1.Task {
+ var role string
+ if v := ctx.Value(_const.CtxBlockRole); v != nil {
+ role = v.(string)
+ }
+ hosts := ctx.Value(_const.CtxBlockHosts).([]string)
+ if block.RunOnce { // if run_once. execute on the first task
+ hosts = hosts[:1]
+ }
+ var uid string
+ if v := ctx.Value(_const.CtxBlockTaskUID); v != nil {
+ uid = v.(string)
+ }
+ var when []string
+ if v := ctx.Value(_const.CtxBlockWhen); v != nil {
+ when = v.([]string)
+ }
+
+ task := &kubekeyv1alpha1.Task{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Task",
+ APIVersion: "kubekey.kubesphere.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%s-%s", owner.GetName(), rand.String(12)),
+ Namespace: owner.GetNamespace(),
+ UID: types.UID(uid),
+ CreationTimestamp: metav1.Now(),
+ Annotations: map[string]string{
+ kubekeyv1alpha1.TaskAnnotationRole: role,
+ },
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: owner.GetObjectKind().GroupVersionKind().GroupVersion().String(),
+ Kind: owner.GetObjectKind().GroupVersionKind().Kind,
+ Name: owner.GetName(),
+ UID: owner.GetUID(),
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
+ },
+ },
+ },
+ Spec: kubekeyv1alpha1.KubeKeyTaskSpec{
+ Name: block.Name,
+ Hosts: hosts,
+ IgnoreError: block.IgnoreErrors,
+ Retries: block.Retries,
+ //Loop: block.Loop,
+ When: when,
+ FailedWhen: block.FailedWhen.Data,
+ Register: block.Register,
+ },
+ Status: kubekeyv1alpha1.TaskStatus{
+ Phase: kubekeyv1alpha1.TaskPhasePending,
+ },
+ }
+ if len(block.Loop) != 0 {
+ data, err := json.Marshal(block.Loop)
+ if err != nil {
+ klog.Errorf("marshal loop %v error: %v", block.Loop, err)
+ }
+ task.Spec.Loop = runtime.RawExtension{Raw: data}
+ }
+
+ return task
+}
+
+// GroupHostBySerial group hosts by serial
+func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
+ if len(serial) == 0 {
+ return [][]string{hosts}, nil
+ }
+ result := make([][]string, 0)
+ sp := 0
+ for _, a := range serial {
+ switch a.(type) {
+ case int:
+ if sp+a.(int) > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+a.(int)])
+ sp += a.(int)
+ case string:
+ if strings.HasSuffix(a.(string), "%") {
+ b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%"))
+ if err != nil {
+ klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
+ return nil, err
+ }
+ if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))])
+ sp += int(math.Ceil(float64(len(hosts)*b) / 100.0))
+ } else {
+ b, err := strconv.Atoi(a.(string))
+ if err != nil {
+ klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
+ return nil, err
+ }
+ if sp+b > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+b])
+ sp += b
+ }
+ default:
+ return nil, fmt.Errorf("unknown serial type. only support int or percent")
+ }
+ }
+ // if serial is not match all hosts. use last serial
+ if sp < len(hosts) {
+ a := serial[len(serial)-1]
+ for {
+ switch a.(type) {
+ case int:
+ if sp+a.(int) > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+a.(int)])
+ sp += a.(int)
+ case string:
+ if strings.HasSuffix(a.(string), "%") {
+ b, err := strconv.Atoi(strings.TrimSuffix(a.(string), "%"))
+ if err != nil {
+ klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
+ return nil, err
+ }
+ if sp+int(math.Ceil(float64(len(hosts)*b)/100.0)) > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+int(math.Ceil(float64(len(hosts)*b)/100.0))])
+ sp += int(math.Ceil(float64(len(hosts)*b) / 100.0))
+ } else {
+ b, err := strconv.Atoi(a.(string))
+ if err != nil {
+ klog.Errorf("convert serial %s to int failed: %v", a.(string), err)
+ return nil, err
+ }
+ if sp+b > len(hosts)-1 {
+ result = append(result, hosts[sp:])
+ return result, nil
+ }
+ result = append(result, hosts[sp:sp+b])
+ sp += b
+ }
+ default:
+ return nil, fmt.Errorf("unknown serial type. only support int or percent")
+ }
+ }
+ }
+ return result, nil
+}
+
+// CalculatePipelineStatus calculate pipeline status from tasks
+func CalculatePipelineStatus(nsTasks *kubekeyv1alpha1.TaskList, pipeline *kubekeyv1.Pipeline) {
+ if pipeline.Status.Phase != kubekeyv1.PipelinePhaseRunning {
+ // only deal running pipeline
+ return
+ }
+ pipeline.Status.TaskResult = kubekeyv1.PipelineTaskResult{
+ Total: len(nsTasks.Items),
+ }
+ var failedDetail []kubekeyv1.PipelineFailedDetail
+ for _, t := range nsTasks.Items {
+ switch t.Status.Phase {
+ case kubekeyv1alpha1.TaskPhaseSuccess:
+ pipeline.Status.TaskResult.Success++
+ case kubekeyv1alpha1.TaskPhaseIgnored:
+ pipeline.Status.TaskResult.Ignored++
+ case kubekeyv1alpha1.TaskPhaseSkipped:
+ pipeline.Status.TaskResult.Skipped++
+ }
+ if t.Status.Phase == kubekeyv1alpha1.TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount {
+ var hostReason []kubekeyv1.PipelineFailedDetailHost
+ for _, tr := range t.Status.FailedDetail {
+ hostReason = append(hostReason, kubekeyv1.PipelineFailedDetailHost{
+ Host: tr.Host,
+ Stdout: tr.Stdout,
+ StdErr: tr.StdErr,
+ })
+ }
+ failedDetail = append(failedDetail, kubekeyv1.PipelineFailedDetail{
+ Task: t.Name,
+ Hosts: hostReason,
+ })
+ pipeline.Status.TaskResult.Failed++
+ }
+ }
+
+ if pipeline.Status.TaskResult.Failed != 0 {
+ pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
+ pipeline.Status.Reason = "task failed"
+ pipeline.Status.FailedDetail = failedDetail
+ } else if pipeline.Status.TaskResult.Total == pipeline.Status.TaskResult.Success+pipeline.Status.TaskResult.Ignored+pipeline.Status.TaskResult.Skipped {
+ pipeline.Status.Phase = kubekeyv1.PipelinePhaseSucceed
+ }
+
+}
diff --git a/pkg/converter/converter_test.go b/pkg/converter/converter_test.go
new file mode 100644
index 00000000..b6ccfcda
--- /dev/null
+++ b/pkg/converter/converter_test.go
@@ -0,0 +1,214 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package converter
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+)
+
+func TestMarshalPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ file string
+ except *kkcorev1.Playbook
+ }{
+ {
+ name: "marshal playbook",
+ file: "playbooks/playbook1.yaml",
+ except: &kkcorev1.Playbook{[]kkcorev1.Play{
+ {
+ Base: kkcorev1.Base{Name: "play1"},
+ PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}},
+ Roles: []kkcorev1.Role{
+ {kkcorev1.RoleInfo{
+ Role: "role1",
+ Block: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "role1 | block1"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ }},
+ },
+ Handlers: nil,
+ PreTasks: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | pre_block1"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ PostTasks: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | post_block1"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ Tasks: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1"}},
+ BlockInfo: kkcorev1.BlockInfo{Block: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block1"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block1 | block2"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ }},
+ },
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play1 | block2"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ },
+ {
+ Base: kkcorev1.Base{Name: "play2"},
+ PlayHost: kkcorev1.PlayHost{Hosts: []string{"localhost"}},
+ Tasks: []kkcorev1.Block{
+ {
+ BlockBase: kkcorev1.BlockBase{Base: kkcorev1.Base{Name: "play2 | block1"}},
+ Task: kkcorev1.Task{UnknownFiled: map[string]any{
+ "debug": map[string]any{
+ "msg": "echo \"hello world\"",
+ },
+ }},
+ },
+ },
+ },
+ }},
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ pb, err := MarshalPlaybook(os.DirFS("testdata"), tc.file)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.except, pb)
+ })
+ }
+}
+
+func TestGroupHostBySerial(t *testing.T) {
+ hosts := []string{"h1", "h2", "h3", "h4", "h5", "h6", "h7"}
+ testcases := []struct {
+ name string
+ serial []any
+ exceptResult [][]string
+ exceptErr bool
+ }{
+ {
+ name: "group host by 1",
+ serial: []any{1},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2"},
+ {"h3"},
+ {"h4"},
+ {"h5"},
+ {"h6"},
+ {"h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 2",
+ serial: []any{2},
+ exceptResult: [][]string{
+ {"h1", "h2"},
+ {"h3", "h4"},
+ {"h5", "h6"},
+ {"h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 1 and 2",
+ serial: []any{1, 2},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2", "h3"},
+ {"h4", "h5"},
+ {"h6", "h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by serial 1 and 40%",
+ serial: []any{"1", "40%"},
+ exceptResult: [][]string{
+ {"h1"},
+ {"h2", "h3", "h4"},
+ {"h5", "h6", "h7"},
+ },
+ exceptErr: false,
+ },
+ {
+ name: "group host by unSupport serial type",
+ serial: []any{1.1},
+ exceptResult: nil,
+ exceptErr: true,
+ },
+ {
+ name: "group host by unSupport serial value",
+ serial: []any{"%1.1%"},
+ exceptResult: nil,
+ exceptErr: true,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ result, err := GroupHostBySerial(hosts, tc.serial)
+ if tc.exceptErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.exceptResult, result)
+ }
+ })
+ }
+}
diff --git a/pkg/converter/testdata/playbooks/playbook1.yaml b/pkg/converter/testdata/playbooks/playbook1.yaml
new file mode 100644
index 00000000..08e2e5d0
--- /dev/null
+++ b/pkg/converter/testdata/playbooks/playbook1.yaml
@@ -0,0 +1,30 @@
+- name: play1
+ hosts: localhost
+ pre_tasks:
+ - name: play1 | pre_block1
+ debug:
+ msg: echo "hello world"
+ tasks:
+ - name: play1 | block1
+ block:
+ - name: play1 | block1 | block1
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block1 | block2
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block2
+ debug:
+ msg: echo "hello world"
+ post_tasks:
+ - name: play1 | post_block1
+ debug:
+ msg: echo "hello world"
+ roles:
+ - role1
+- name: play2
+ hosts: localhost
+ tasks:
+ - name: play2 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/pkg/converter/testdata/roles/role1/tasks/main.yaml b/pkg/converter/testdata/roles/role1/tasks/main.yaml
new file mode 100644
index 00000000..0a50611a
--- /dev/null
+++ b/pkg/converter/testdata/roles/role1/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: role1 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/pkg/converter/tmpl/filter_extension.go b/pkg/converter/tmpl/filter_extension.go
new file mode 100644
index 00000000..1644c73b
--- /dev/null
+++ b/pkg/converter/tmpl/filter_extension.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "fmt"
+ "math"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/flosch/pongo2/v6"
+ "k8s.io/apimachinery/pkg/util/version"
+)
+
+func init() {
+ pongo2.RegisterFilter("defined", filterDefined)
+ pongo2.RegisterFilter("version", filterVersion)
+ pongo2.RegisterFilter("pow", filterPow)
+ pongo2.RegisterFilter("match", filterMatch)
+ pongo2.RegisterFilter("basename", filterBasename)
+}
+
+func filterDefined(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
+ if in.IsNil() {
+ return pongo2.AsValue(false), nil
+ }
+ return pongo2.AsValue(true), nil
+}
+
+func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
+ inVersion, err := version.ParseGeneric(in.String())
+ if err != nil {
+ return pongo2.AsValue(nil), &pongo2.Error{
+ Sender: "filter:version in",
+ OrigError: err,
+ }
+ }
+ paramString := param.String()
+ customChoices := strings.Split(paramString, ",")
+ if len(customChoices) != 2 {
+ return nil, &pongo2.Error{
+ Sender: "filter:version",
+ OrigError: fmt.Errorf("'version'-filter need 2 arguments(as: verion:'xxx,xxx') but got'%s'", paramString),
+ }
+ }
+ ci, err := inVersion.Compare(customChoices[1])
+ if err != nil {
+ return pongo2.AsValue(nil), &pongo2.Error{
+ Sender: "filter:version",
+ OrigError: fmt.Errorf("converter second param error: %v", err),
+ }
+ }
+ switch customChoices[0] {
+ case ">":
+ return pongo2.AsValue(ci == 1), nil
+ case "=":
+ return pongo2.AsValue(ci == 0), nil
+ case "<":
+ return pongo2.AsValue(ci == -1), nil
+ case ">=":
+ return pongo2.AsValue(ci >= 0), nil
+ case "<=":
+ return pongo2.AsValue(ci <= 0), nil
+ default:
+ return pongo2.AsValue(nil), &pongo2.Error{
+ Sender: "filter:version",
+ OrigError: fmt.Errorf("converter first param error: %v", err),
+ }
+ }
+}
+
+func filterPow(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
+ return pongo2.AsValue(math.Pow(in.Float(), param.Float())), nil
+}
+
+func filterMatch(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
+ match, err := regexp.Match(param.String(), []byte(in.String()))
+ if err != nil {
+ return pongo2.AsValue(nil), &pongo2.Error{Sender: "filter:match", OrigError: err}
+ }
+ return pongo2.AsValue(match), nil
+}
+
+func filterBasename(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
+ return pongo2.AsValue(filepath.Base(in.String())), nil
+}
diff --git a/pkg/converter/tmpl/filter_extension_test.go b/pkg/converter/tmpl/filter_extension_test.go
new file mode 100644
index 00000000..5cdb1ae3
--- /dev/null
+++ b/pkg/converter/tmpl/filter_extension_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "testing"
+
+ "github.com/flosch/pongo2/v6"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFilter(t *testing.T) {
+ testcases := []struct {
+ name string
+ input string
+ ctx pongo2.Context
+ except string
+ }{
+ {
+ name: "default",
+ input: "{{ os.release.Name | default_if_none:false }}",
+ ctx: map[string]any{
+ "os": map[string]any{
+ "release": map[string]any{
+ "ID": "a",
+ },
+ },
+ },
+ except: "False",
+ },
+ {
+ name: "default_if_none",
+ input: "{{ os.release.Name | default_if_none:'b' }}",
+ ctx: map[string]any{
+ "os": map[string]any{
+ "release": map[string]any{
+ "ID": "a",
+ },
+ },
+ },
+ except: "b",
+ },
+ {
+ name: "defined",
+ input: "{{ test | defined }}",
+ ctx: map[string]any{
+ "test": "aaa",
+ },
+ except: "True",
+ },
+ {
+ name: "version_greater",
+ input: "{{ test | version:'>=,v1.19.0' }}",
+ ctx: map[string]any{
+ "test": "v1.23.10",
+ },
+ except: "True",
+ },
+ {
+ name: "divisibleby",
+ input: "{{ not test['a'] | length | divisibleby:2 }}",
+ ctx: map[string]any{
+ "test": map[string]any{
+ "a": "1",
+ },
+ },
+ except: "True",
+ },
+ {
+ name: "power",
+ input: "{{ (test | integer) >= (2 | pow: test2 | integer ) }}",
+ ctx: map[string]any{
+ "test": "12",
+ "test2": "3s",
+ },
+ except: "True",
+ },
+ {
+ name: "split",
+ input: "{{ kernelVersion | split:'-' | first }}",
+ ctx: map[string]any{
+ "kernelVersion": "5.15.0-89-generic",
+ },
+ except: "5.15.0",
+ },
+ {
+ name: "match",
+ input: "{{ test | match:regex }}",
+ ctx: map[string]any{
+ "test": "abc",
+ "regex": "[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$",
+ },
+ except: "True",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run("filter: "+tc.name, func(t *testing.T) {
+ tql, err := pongo2.FromString(tc.input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ result, err := tql.Execute(tc.ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.except, result)
+ })
+ }
+}
diff --git a/pkg/converter/tmpl/template.go b/pkg/converter/tmpl/template.go
new file mode 100644
index 00000000..aff74fd0
--- /dev/null
+++ b/pkg/converter/tmpl/template.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/flosch/pongo2/v6"
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// ParseBool by pongo2 with not contain "{{ }}". It will add "{{ }}" to input string.
+func ParseBool(v variable.VariableData, inputs []string) (bool, error) {
+ for _, input := range inputs {
+ // first convert.
+ intql, err := pongo2.FromString(input)
+ if err != nil {
+ klog.Errorf("failed to get string: %v", err)
+ return false, err
+ }
+ inres, err := intql.Execute(pongo2.Context(v))
+ if err != nil {
+ klog.Errorf("failed to execute string: %v", err)
+ return false, err
+ }
+
+ // trim line break.
+ inres = strings.TrimSuffix(inres, "\n")
+ inres = fmt.Sprintf("{{ %s }}", inres)
+
+ // second convert.
+ tql, err := pongo2.FromString(inres)
+ if err != nil {
+ klog.Errorf("failed to get string: %v", err)
+ return false, err
+ }
+ result, err := tql.Execute(pongo2.Context(v))
+ if err != nil {
+ klog.Errorf("failed to execute string: %v", err)
+ return false, err
+ }
+ klog.V(4).Infof("the template parse result: %s", result)
+ if result != "True" {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// ParseString with contain "{{ }}"
+func ParseString(v variable.VariableData, input string) (string, error) {
+ tql, err := pongo2.FromString(input)
+ if err != nil {
+ klog.Errorf("failed to get string: %v", err)
+ return input, err
+ }
+ result, err := tql.Execute(pongo2.Context(v))
+ if err != nil {
+ klog.Errorf("failed to execute string: %v", err)
+ return input, err
+ }
+ klog.V(4).Infof("the template parse result: %s", result)
+ return result, nil
+}
+
+func ParseFile(v variable.VariableData, file []byte) (string, error) {
+ tql, err := pongo2.FromBytes(file)
+ if err != nil {
+ klog.Errorf("transfer file to pongo2 template error %v", err)
+ return "", err
+ }
+ result, err := tql.Execute(pongo2.Context(v))
+ if err != nil {
+ klog.Errorf("exec pongo2 template error %v", err)
+ return "", err
+ }
+ klog.V(4).Infof("the template file: %s", result)
+ return result, nil
+}
diff --git a/pkg/converter/tmpl/template_test.go b/pkg/converter/tmpl/template_test.go
new file mode 100644
index 00000000..1f8411a5
--- /dev/null
+++ b/pkg/converter/tmpl/template_test.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tmpl
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func TestParseBool(t *testing.T) {
+ testcases := []struct {
+ name string
+ condition []string
+ variable variable.VariableData
+ excepted bool
+ }{
+ {
+ name: "parse success",
+ condition: []string{"foo == \"bar\""},
+ variable: variable.VariableData{
+ "foo": "bar",
+ },
+ excepted: true,
+ },
+ {
+ name: "in",
+ condition: []string{"test in inArr"},
+ variable: variable.VariableData{
+ "test": "a",
+ "inArr": []string{"a", "b"},
+ },
+ excepted: true,
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ b, _ := ParseBool(tc.variable, tc.condition)
+ assert.Equal(t, tc.excepted, b)
+ })
+ }
+}
+
+func TestParseString(t *testing.T) {
+ testcases := []struct {
+ name string
+ input string
+ variable variable.VariableData
+ excepted string
+ }{
+ {
+ name: "parse success",
+ input: "{{foo}}",
+ variable: map[string]any{
+ "foo": "bar",
+ },
+ excepted: "bar",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ output, _ := ParseString(tc.variable, tc.input)
+ assert.Equal(t, tc.excepted, output)
+ })
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ testcases := []struct {
+ name string
+ variable variable.VariableData
+ excepted string
+ }{
+ {
+ name: "parse success",
+ variable: map[string]any{
+ "foo": "bar",
+ },
+ excepted: "foo: bar",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ output, _ := ParseFile(tc.variable, []byte("foo: {{foo}}"))
+ assert.Equal(t, tc.excepted, output)
+ })
+ }
+}
diff --git a/pkg/manager/command_manager.go b/pkg/manager/command_manager.go
new file mode 100644
index 00000000..461a0c99
--- /dev/null
+++ b/pkg/manager/command_manager.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/controllers"
+ "github.com/kubesphere/kubekey/v4/pkg/task"
+)
+
+type commandManager struct {
+ *kubekeyv1.Pipeline
+ *kubekeyv1.Config
+ *kubekeyv1.Inventory
+
+ ctrlclient.Client
+}
+
+func (m *commandManager) Run(ctx context.Context) error {
+ // create config, inventory and pipeline
+ klog.Infof("[Pipeline %s] start", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ if err := m.Client.Create(ctx, m.Config); err != nil {
+ klog.Errorf("[Pipeline %s] create config error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ return err
+ }
+ if err := m.Client.Create(ctx, m.Inventory); err != nil {
+ klog.Errorf("[Pipeline %s] create inventory error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ return err
+ }
+ if err := m.Client.Create(ctx, m.Pipeline); err != nil {
+ klog.Errorf("[Pipeline %s] create pipeline error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ return err
+ }
+
+ defer func() {
+ // update pipeline status
+ if err := m.Client.Update(ctx, m.Pipeline); err != nil {
+ klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ }
+
+ klog.Infof("[Pipeline %s] finish", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
+ klog.Infof("[Pipeline %s] clean runtime directory", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ // clean runtime directory
+ if err := os.RemoveAll(filepath.Join(_const.GetWorkDir(), _const.RuntimeDir)); err != nil {
+ klog.Errorf("clean runtime directory %s error: %v", filepath.Join(_const.GetWorkDir(), _const.RuntimeDir), err)
+ }
+ }
+ }()
+
+ klog.Infof("[Pipeline %s] start task controller", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ kd, err := task.NewController(task.ControllerOptions{
+ Client: m.Client,
+ TaskReconciler: &controllers.TaskReconciler{
+ Client: m.Client,
+ VariableCache: cache.LocalVariable,
+ },
+ })
+ if err != nil {
+ klog.Errorf("[Pipeline %s] create task controller error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
+ m.Pipeline.Status.Reason = fmt.Sprintf("create task controller failed: %v", err)
+ return err
+ }
+ // init pipeline status
+ m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning
+ if err := kd.AddTasks(ctx, task.AddTaskOptions{
+ Pipeline: m.Pipeline,
+ }); err != nil {
+ klog.Errorf("[Pipeline %s] add task error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
+ m.Pipeline.Status.Reason = fmt.Sprintf("add task to controller failed: %v", err)
+ return err
+ }
+ // update pipeline status
+ if err := m.Client.Update(ctx, m.Pipeline); err != nil {
+ klog.Errorf("[Pipeline %s] update pipeline error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ }
+ go kd.Start(ctx)
+
+ _ = wait.PollUntilContextCancel(ctx, time.Millisecond*100, false, func(ctx context.Context) (done bool, err error) {
+ if err := m.Client.Get(ctx, ctrlclient.ObjectKeyFromObject(m.Pipeline), m.Pipeline); err != nil {
+ klog.Errorf("[Pipeline %s] get pipeline error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ }
+ if m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseFailed || m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
+ return true, nil
+ }
+ return false, nil
+ })
+ // kill by signal
+ if err := syscall.Kill(os.Getpid(), syscall.SIGTERM); err != nil {
+ klog.Errorf("[Pipeline %s] manager terminated error: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline), err)
+ return err
+ }
+ klog.Infof("[Pipeline %s] task finish", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+
+ return nil
+}
diff --git a/pkg/manager/controller_manager.go b/pkg/manager/controller_manager.go
new file mode 100644
index 00000000..57553dfa
--- /dev/null
+++ b/pkg/manager/controller_manager.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/config"
+ ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
+ ctrlmanager "sigs.k8s.io/controller-runtime/pkg/manager"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+ "github.com/kubesphere/kubekey/v4/pkg/controllers"
+ "github.com/kubesphere/kubekey/v4/pkg/task"
+)
+
+type controllerManager struct {
+ ControllerGates []string
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+func (c controllerManager) Run(ctx context.Context) error {
+ ctrl.SetLogger(klog.NewKlogr())
+ scheme := runtime.NewScheme()
+ // add default scheme
+ if err := clientgoscheme.AddToScheme(scheme); err != nil {
+ klog.Errorf("add default scheme error: %v", err)
+ return err
+ }
+ // add kubekey scheme
+ if err := kubekeyv1.AddToScheme(scheme); err != nil {
+ klog.Errorf("add kubekey scheme error: %v", err)
+ return err
+ }
+ mgr, err := ctrl.NewManager(config.GetConfigOrDie(), ctrlmanager.Options{
+ Scheme: scheme,
+ LeaderElection: c.LeaderElection,
+ LeaderElectionID: "controller-leader-election-kk",
+ })
+ if err != nil {
+ klog.Errorf("create manager error: %v", err)
+ return err
+ }
+
+ taskController, err := task.NewController(task.ControllerOptions{
+ MaxConcurrent: c.MaxConcurrentReconciles,
+ Client: cache.NewDelegatingClient(mgr.GetClient()),
+ TaskReconciler: &controllers.TaskReconciler{
+ Client: cache.NewDelegatingClient(mgr.GetClient()),
+ VariableCache: cache.LocalVariable,
+ },
+ })
+ if err != nil {
+ klog.Errorf("create task controller error: %v", err)
+ return err
+ }
+ if err := mgr.Add(taskController); err != nil {
+ klog.Errorf("add task controller error: %v", err)
+ return err
+ }
+
+ if err := (&controllers.PipelineReconciler{
+ Client: cache.NewDelegatingClient(mgr.GetClient()),
+ EventRecorder: mgr.GetEventRecorderFor("pipeline"),
+ TaskController: taskController,
+ }).SetupWithManager(ctx, mgr, controllers.Options{
+ ControllerGates: c.ControllerGates,
+ Options: ctrlcontroller.Options{
+ MaxConcurrentReconciles: c.MaxConcurrentReconciles,
+ },
+ }); err != nil {
+ klog.Errorf("create pipeline controller error: %v", err)
+ return err
+ }
+
+ return mgr.Start(ctx)
+}
diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go
new file mode 100644
index 00000000..2b6cf09f
--- /dev/null
+++ b/pkg/manager/manager.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package manager
+
+import (
+ "context"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+)
+
+// Manager shared dependencies such as Addr and , and provides them to Runnable.
+type Manager interface {
+ // Run the driver
+ Run(ctx context.Context) error
+}
+
+type CommandManagerOptions struct {
+ *kubekeyv1.Pipeline
+ *kubekeyv1.Config
+ *kubekeyv1.Inventory
+}
+
+func NewCommandManager(o CommandManagerOptions) Manager {
+ return &commandManager{
+ Pipeline: o.Pipeline,
+ Config: o.Config,
+ Inventory: o.Inventory,
+ Client: cache.NewDelegatingClient(nil),
+ }
+}
+
+type ControllerManagerOptions struct {
+ ControllerGates []string
+ MaxConcurrentReconciles int
+ LeaderElection bool
+}
+
+func NewControllerManager(o ControllerManagerOptions) Manager {
+ return &controllerManager{
+ ControllerGates: o.ControllerGates,
+ MaxConcurrentReconciles: o.MaxConcurrentReconciles,
+ LeaderElection: o.LeaderElection,
+ }
+}
diff --git a/pkg/modules/assert.go b/pkg/modules/assert.go
new file mode 100644
index 00000000..c838e330
--- /dev/null
+++ b/pkg/modules/assert.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) {
+ args := variable.Extension2Variables(options.Args)
+ that := variable.StringSliceVar(args, "that")
+ if that == nil {
+ st := variable.StringVar(args, "that")
+ if st == nil {
+ return "", "\"that\" should be []string or string"
+ }
+ that = []string{*st}
+ }
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ return "", err.Error()
+ }
+ ok, err := tmpl.ParseBool(lg.(variable.VariableData), that)
+ if err != nil {
+ return "", err.Error()
+ }
+
+ if ok {
+ if v := variable.StringVar(args, "success_msg"); v != nil {
+ if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil {
+ return "", err.Error()
+ } else {
+ return r, ""
+ }
+ }
+ return "True", ""
+ } else {
+ if v := variable.StringVar(args, "fail_msg"); v != nil {
+ if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil {
+ return "", err.Error()
+ } else {
+ return "False", r
+ }
+ }
+ //if v := variable.StringVar(args, "msg"); v != nil {
+ // if r, err := tmpl.ParseString(lg.(variable.VariableData), *v); err != nil {
+ // return "", err.Error()
+ // } else {
+ // return "False", r
+ // }
+ //}
+ return "False", "False"
+ }
+}
diff --git a/pkg/modules/assert_test.go b/pkg/modules/assert_test.go
new file mode 100644
index 00000000..0ab46691
--- /dev/null
+++ b/pkg/modules/assert_test.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ testassert "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestAssert(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-that",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{},
+ },
+ exceptStderr: "\"that\" should be []string or string",
+ },
+ {
+ name: "success with non-msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "testvalue==\"a\""]}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "testvalue": "a",
+ },
+ },
+ },
+ exceptStdout: "True",
+ },
+ {
+ name: "success with success_msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "k1==\"v1\""], "success_msg": "success {{k2}}"}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: "success v2",
+ },
+ {
+ name: "failed with non-msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "k1==\"v2\""]}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: "False",
+ exceptStderr: "False",
+ },
+ {
+ name: "failed with failed_msg",
+ opt: ExecOptions{
+ Host: "local",
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"that": ["true", "k1==\"v2\""], "fail_msg": "failed {{k2}}"}`),
+ },
+ Variable: &testVariable{
+ value: map[string]any{
+ "k1": "v1",
+ "k2": "v2",
+ },
+ },
+ },
+ exceptStdout: "False",
+ exceptStderr: "failed v2",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ acStdout, acStderr := ModuleAssert(ctx, tc.opt)
+ testassert.Equal(t, tc.exceptStdout, acStdout)
+ testassert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/pkg/modules/command.go b/pkg/modules/command.go
new file mode 100644
index 00000000..a3d67dea
--- /dev/null
+++ b/pkg/modules/command.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) {
+ ha, _ := options.Variable.Get(variable.HostVars{HostName: options.Host})
+ var conn connector.Connector
+ if v := ctx.Value("connector"); v != nil {
+ conn = v.(connector.Connector)
+ } else {
+ conn = connector.NewConnector(options.Host, ha.(variable.VariableData))
+ }
+ if err := conn.Init(ctx); err != nil {
+ klog.Errorf("failed to init connector %v", err)
+ return "", err.Error()
+ }
+ defer conn.Close(ctx)
+
+ // convert command template to string
+ arg := variable.Extension2String(options.Args)
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ return "", err.Error()
+ }
+ result, err := tmpl.ParseString(lg.(variable.VariableData), arg)
+ if err != nil {
+ return "", err.Error()
+ }
+ // execute command
+ var stdout, stderr string
+ data, err := conn.ExecuteCommand(ctx, result)
+ if err != nil {
+ stderr = err.Error()
+ }
+ if data != nil {
+ stdout = strings.TrimSuffix(string(data), "\n")
+ }
+ return stdout, stderr
+}
diff --git a/pkg/modules/command_test.go b/pkg/modules/command_test.go
new file mode 100644
index 00000000..22c84582
--- /dev/null
+++ b/pkg/modules/command_test.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ testassert "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestCommand(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctx context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-host variable",
+ opt: ExecOptions{
+ Variable: &testVariable{},
+ },
+ ctx: context.Background(),
+ exceptStderr: "host is not set",
+ },
+ {
+ name: "exec command success",
+ ctx: context.WithValue(context.Background(), "connector", &testConnector{
+ output: []byte("success"),
+ }),
+ opt: ExecOptions{
+ Host: "test",
+ Args: runtime.RawExtension{Raw: []byte("echo success")},
+ Variable: &testVariable{},
+ },
+ exceptStdout: "success",
+ },
+ {
+ name: "exec command failed",
+ ctx: context.WithValue(context.Background(), "connector", &testConnector{
+ commandErr: fmt.Errorf("failed"),
+ }),
+ opt: ExecOptions{
+ Host: "test",
+ Args: runtime.RawExtension{Raw: []byte("echo success")},
+ Variable: &testVariable{},
+ },
+ exceptStderr: "failed",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctx, time.Second*5)
+ defer cancel()
+ acStdout, acStderr := ModuleCommand(ctx, tc.opt)
+ testassert.Equal(t, tc.exceptStdout, acStdout)
+ testassert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/pkg/modules/copy.go b/pkg/modules/copy.go
new file mode 100644
index 00000000..c21327a0
--- /dev/null
+++ b/pkg/modules/copy.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
+ // check args
+ args := variable.Extension2Variables(options.Args)
+ src := variable.StringVar(args, "src")
+ content := variable.StringVar(args, "content")
+ if src == nil && content == nil {
+ return "", "\"src\" or \"content\" in args should be string"
+ }
+ dest := variable.StringVar(args, "dest")
+ if dest == nil {
+ return "", "\"dest\" in args should be string"
+ }
+ lv, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ klog.Errorf("failed to get location vars %v", err)
+ return "", err.Error()
+ }
+ destStr, err := tmpl.ParseString(lv.(variable.VariableData), *dest)
+ if err != nil {
+ klog.Errorf("template parse src %s error: %v", *dest, err)
+ return "", err.Error()
+ }
+
+ var conn connector.Connector
+ if v := ctx.Value("connector"); v != nil {
+ conn = v.(connector.Connector)
+ } else {
+ // get connector
+ ha, err := options.Variable.Get(variable.HostVars{HostName: options.Host})
+ if err != nil {
+ klog.Errorf("failed to get host vars %v", err)
+ return "", err.Error()
+ }
+ conn = connector.NewConnector(options.Host, ha.(variable.VariableData))
+ }
+ if err := conn.Init(ctx); err != nil {
+ klog.Errorf("failed to init connector %v", err)
+ return "", err.Error()
+ }
+ defer conn.Close(ctx)
+
+ if src != nil {
+ // convert src
+ srcStr, err := tmpl.ParseString(lv.(variable.VariableData), *src)
+ if err != nil {
+ klog.Errorf("template parse src %s error: %v", *src, err)
+ return "", err.Error()
+ }
+ var baseFS fs.FS
+ if filepath.IsAbs(srcStr) {
+ baseFS = os.DirFS("/")
+ } else {
+ projectFs, err := project.New(project.Options{Pipeline: &options.Pipeline}).FS(ctx, false)
+ if err != nil {
+ klog.Errorf("failed to get project fs %v", err)
+ return "", err.Error()
+ }
+ baseFS = projectFs
+ }
+ roleName := options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]
+ flPath := project.GetFilesFromPlayBook(baseFS, options.Pipeline.Spec.Playbook, roleName, srcStr)
+ fileInfo, err := fs.Stat(baseFS, flPath)
+ if err != nil {
+ klog.Errorf("failed to get src file in local %v", err)
+ return "", err.Error()
+ }
+ if fileInfo.IsDir() {
+ // src is dir
+ if err := fs.WalkDir(baseFS, flPath, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(srcStr, path)
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ fi, err := info.Info()
+ if err != nil {
+ return err
+ }
+ mode := fi.Mode()
+ if variable.IntVar(args, "mode") != nil {
+ mode = os.FileMode(*variable.IntVar(args, "mode"))
+ }
+ data, err := fs.ReadFile(baseFS, rel)
+ if err != nil {
+ return err
+ }
+ if err := conn.CopyFile(ctx, data, filepath.Join(destStr, rel), mode); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return "", err.Error()
+ }
+ } else {
+ // src is file
+ data, err := fs.ReadFile(baseFS, flPath)
+ if err != nil {
+ klog.Errorf("failed to read file %v", err)
+ return "", err.Error()
+ }
+ if strings.HasSuffix(destStr, "/") {
+ destStr = destStr + filepath.Base(srcStr)
+ }
+ if err := conn.CopyFile(ctx, data, destStr, fileInfo.Mode()); err != nil {
+ klog.Errorf("failed to copy file %v", err)
+ return "", err.Error()
+ }
+ return "success", ""
+ }
+ } else if content != nil {
+ if strings.HasSuffix(destStr, "/") {
+ return "", "\"content\" should copy to a file"
+ }
+ mode := os.ModePerm
+ if v := variable.IntVar(args, "mode"); v != nil {
+ mode = os.FileMode(*v)
+ }
+
+ if err := conn.CopyFile(ctx, []byte(*content), destStr, mode); err != nil {
+ return "", err.Error()
+ }
+ }
+ return "success", ""
+}
diff --git a/pkg/modules/copy_test.go b/pkg/modules/copy_test.go
new file mode 100644
index 00000000..cb0187a5
--- /dev/null
+++ b/pkg/modules/copy_test.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ testassert "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestCopy(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctx context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "src and content is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ Variable: nil,
+ },
+ ctx: context.Background(),
+ exceptStderr: "\"src\" or \"content\" in args should be string",
+ },
+ {
+ name: "dest is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world"}`),
+ },
+ Host: "local",
+ Variable: nil,
+ },
+ ctx: context.Background(),
+ exceptStderr: "\"dest\" in args should be string",
+ },
+ {
+ name: "content not copy to file",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctx: context.WithValue(context.Background(), "connector", &testConnector{
+ output: []byte("success"),
+ }),
+ exceptStderr: "\"content\" should copy to a file",
+ },
+ {
+ name: "copy success",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/test.txt"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctx: context.WithValue(context.Background(), "connector", &testConnector{
+ output: []byte("success"),
+ }),
+ exceptStdout: "success",
+ },
+ {
+ name: "copy failed",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"content": "hello world", "dest": "/etc/test.txt"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{},
+ },
+ ctx: context.WithValue(context.Background(), "connector", &testConnector{
+ copyErr: fmt.Errorf("copy failed"),
+ }),
+ exceptStderr: "copy failed",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctx, time.Second*5)
+ defer cancel()
+ acStdout, acStderr := ModuleCopy(ctx, tc.opt)
+ testassert.Equal(t, tc.exceptStdout, acStdout)
+ testassert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/pkg/modules/debug.go b/pkg/modules/debug.go
new file mode 100644
index 00000000..94900c47
--- /dev/null
+++ b/pkg/modules/debug.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleDebug(ctx context.Context, options ExecOptions) (string, string) {
+ args := variable.Extension2Variables(options.Args)
+ if v := variable.StringVar(args, "var"); v != nil {
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ return "", err.Error()
+ }
+ result, err := tmpl.ParseString(lg.(variable.VariableData), fmt.Sprintf("{{ %s }}", *v))
+ if err != nil {
+ klog.Errorf("failed to get var %v", err)
+ return "", err.Error()
+ }
+ return result, ""
+ }
+
+ if v := variable.StringVar(args, "msg"); v != nil {
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ return "", err.Error()
+ }
+ result, err := tmpl.ParseString(lg.(variable.VariableData), *v)
+ if err != nil {
+ klog.Errorf("failed to get var %v", err)
+ return "", err.Error()
+ }
+ return result, ""
+ }
+
+ return "", "unknown args for debug. only support var or msg"
+}
diff --git a/pkg/modules/debug_test.go b/pkg/modules/debug_test.go
new file mode 100644
index 00000000..5e07e0c0
--- /dev/null
+++ b/pkg/modules/debug_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ testassert "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestDebug(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "non-var and non-msg",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ },
+ exceptStderr: "unknown args for debug. only support var or msg",
+ },
+ {
+ name: "var value",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"var": "k"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "k": "v",
+ },
+ },
+ },
+ exceptStdout: "v",
+ },
+ {
+ name: "msg value",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"msg": "{{ k }}"}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "k": "v",
+ },
+ },
+ },
+ exceptStdout: "v",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ acStdout, acStderr := ModuleDebug(ctx, tc.opt)
+ testassert.Equal(t, tc.exceptStdout, acStdout)
+ testassert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/pkg/modules/helper.go b/pkg/modules/helper.go
new file mode 100644
index 00000000..7f4aa1df
--- /dev/null
+++ b/pkg/modules/helper.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// findPath from roles/roleSub dir, playbook dir or current dir
+func findPath(fs fs.FS, roleSub string, src *string, options ExecOptions) *string {
+ if src == nil || filepath.IsAbs(*src) {
+ return src
+ }
+
+ findFile := []string{}
+ absPlaybook := options.Pipeline.Spec.Playbook
+ if !filepath.IsAbs(absPlaybook) {
+ absPlaybook = filepath.Join(_const.GetWorkDir(), _const.ProjectDir, options.Pipeline.Spec.Project.Name, absPlaybook)
+ }
+ baseDir := filepath.Dir(filepath.Dir(absPlaybook))
+ // findFile from roles/files
+ if role := options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]; role != "" {
+ findFile = append(findFile, filepath.Join(baseDir, _const.ProjectRolesDir, role, roleSub, *src))
+ }
+ // find from playbook dir
+ findFile = append(findFile, filepath.Join(filepath.Dir(absPlaybook), *src))
+ // find from current dir
+ if dir, err := os.Getwd(); err == nil {
+ findFile = append(findFile, filepath.Join(dir, *src))
+ }
+
+ for _, s := range findFile {
+ if _, err := os.Stat(s); err == nil {
+ return &s
+ }
+ }
+ return nil
+}
diff --git a/pkg/modules/helper_test.go b/pkg/modules/helper_test.go
new file mode 100644
index 00000000..012639b0
--- /dev/null
+++ b/pkg/modules/helper_test.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "io"
+ "io/fs"
+
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type testVariable struct {
+ value variable.VariableData
+ err error
+}
+
+func (v testVariable) Get(option variable.GetOption) (any, error) {
+ return v.value, v.err
+}
+
+func (v testVariable) Merge(option ...variable.MergeOption) error {
+ v.value = variable.VariableData{
+ "k": "v",
+ }
+ return nil
+}
+
+type testConnector struct {
+ // return for init
+ initErr error
+ // return for close
+ closeErr error
+ // return for copy
+ copyErr error
+ // return for fetch
+ fetchErr error
+ // return for command
+ output []byte
+ commandErr error
+}
+
+func (t testConnector) Init(ctx context.Context) error {
+ return t.initErr
+}
+
+func (t testConnector) Close(ctx context.Context) error {
+ return t.closeErr
+}
+
+func (t testConnector) CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
+ return t.copyErr
+}
+
+func (t testConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error {
+ return t.fetchErr
+}
+
+func (t testConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
+ return t.output, t.commandErr
+}
diff --git a/pkg/modules/module.go b/pkg/modules/module.go
new file mode 100644
index 00000000..c0006002
--- /dev/null
+++ b/pkg/modules/module.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type ModuleExecFunc func(ctx context.Context, options ExecOptions) (stdout string, stderr string)
+
+type ExecOptions struct {
+ // the defined Args for module
+ Args runtime.RawExtension
+ // which Host to execute
+ Host string
+ // the variable module need
+ variable.Variable
+ // the task to be executed
+ kubekeyv1alpha1.Task
+ // the pipeline to be executed
+ kubekeyv1.Pipeline
+}
+
+var module = make(map[string]ModuleExecFunc)
+
+func RegisterModule(moduleName string, exec ModuleExecFunc) error {
+ if _, ok := module[moduleName]; ok {
+ klog.Errorf("module %s is exist", moduleName)
+ return fmt.Errorf("module %s is exist", moduleName)
+ }
+ module[moduleName] = exec
+ return nil
+}
+
+func FindModule(moduleName string) ModuleExecFunc {
+ return module[moduleName]
+}
+
+func init() {
+ RegisterModule("assert", ModuleAssert)
+ RegisterModule("command", ModuleCommand)
+ RegisterModule("shell", ModuleCommand)
+ RegisterModule("copy", ModuleCopy)
+ RegisterModule("debug", ModuleDebug)
+ RegisterModule("template", ModuleTemplate)
+ RegisterModule("set_fact", ModuleSetFact)
+}
diff --git a/pkg/modules/set_fact.go b/pkg/modules/set_fact.go
new file mode 100644
index 00000000..679958ec
--- /dev/null
+++ b/pkg/modules/set_fact.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleSetFact(ctx context.Context, options ExecOptions) (string, string) {
+ args := variable.Extension2Variables(options.Args)
+ lv, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ klog.Errorf("failed to get location vars %v", err)
+ return "", err.Error()
+ }
+
+ factVars := variable.VariableData{}
+ for k, v := range args {
+ switch v.(type) {
+ case string:
+ factVars[k], err = tmpl.ParseString(lv.(variable.VariableData), v.(string))
+ if err != nil {
+ klog.Errorf("template parse %s error: %v", v.(string), err)
+ return "", err.Error()
+ }
+ default:
+ factVars[k] = v
+ }
+ }
+
+ if err := options.Variable.Merge(variable.HostMerge{
+ HostNames: []string{options.Host},
+ LocationUID: "",
+ Data: factVars,
+ }); err != nil {
+ klog.Errorf("merge fact error: %v", err)
+ return "", err.Error()
+ }
+ return "success", ""
+}
diff --git a/pkg/modules/set_fact_test.go b/pkg/modules/set_fact_test.go
new file mode 100644
index 00000000..5a946a02
--- /dev/null
+++ b/pkg/modules/set_fact_test.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+)
+
+func TestSetFact(t *testing.T) {
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "success",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(`{"k": "v"}`),
+ },
+ Host: "",
+ Variable: &testVariable{},
+ Task: kubekeyv1alpha1.Task{},
+ Pipeline: kubekeyv1.Pipeline{},
+ },
+ exceptStdout: "success",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ stdout, stderr := ModuleSetFact(ctx, tc.opt)
+ assert.Equal(t, tc.exceptStdout, stdout)
+ assert.Equal(t, tc.exceptStderr, stderr)
+ })
+ }
+}
diff --git a/pkg/modules/template.go b/pkg/modules/template.go
new file mode 100644
index 00000000..6dc89d5d
--- /dev/null
+++ b/pkg/modules/template.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "k8s.io/klog/v2"
+
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
+ // check args
+ args := variable.Extension2Variables(options.Args)
+ src := variable.StringVar(args, "src")
+ if src == nil {
+ return "", "\"src\" should be string"
+ }
+ dest := variable.StringVar(args, "dest")
+ if dest == nil {
+ return "", "\"dest\" should be string"
+ }
+
+ lv, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ klog.Errorf("failed to get location vars %v", err)
+ return "", err.Error()
+ }
+ srcStr, err := tmpl.ParseString(lv.(variable.VariableData), *src)
+ if err != nil {
+ klog.Errorf("template parse src %s error: %v", *src, err)
+ return "", err.Error()
+ }
+ destStr, err := tmpl.ParseString(lv.(variable.VariableData), *dest)
+ if err != nil {
+ klog.Errorf("template parse src %s error: %v", *dest, err)
+ return "", err.Error()
+ }
+
+ var baseFS fs.FS
+ if filepath.IsAbs(srcStr) {
+ baseFS = os.DirFS("/")
+ } else {
+ projectFs, err := project.New(project.Options{Pipeline: &options.Pipeline}).FS(ctx, false)
+ if err != nil {
+ klog.Errorf("failed to get project fs %v", err)
+ return "", err.Error()
+ }
+ baseFS = projectFs
+ }
+ roleName := options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]
+ flPath := project.GetTemplatesFromPlayBook(baseFS, options.Pipeline.Spec.Playbook, roleName, srcStr)
+ if _, err := fs.Stat(baseFS, flPath); err != nil {
+ klog.Errorf("find src error %v", err)
+ return "", err.Error()
+ }
+
+ var conn connector.Connector
+ if v := ctx.Value("connector"); v != nil {
+ conn = v.(connector.Connector)
+ } else {
+ // get connector
+ ha, err := options.Variable.Get(variable.HostVars{HostName: options.Host})
+ if err != nil {
+ klog.Errorf("failed to get host %v", err)
+ return "", err.Error()
+ }
+ conn = connector.NewConnector(options.Host, ha.(variable.VariableData))
+ }
+ if err := conn.Init(ctx); err != nil {
+ klog.Errorf("failed to init connector %v", err)
+ return "", err.Error()
+ }
+ defer conn.Close(ctx)
+
+ // find src file
+ lg, err := options.Variable.Get(variable.LocationVars{
+ HostName: options.Host,
+ LocationUID: string(options.Task.UID),
+ })
+ if err != nil {
+ return "", err.Error()
+ }
+
+ data, err := fs.ReadFile(baseFS, flPath)
+ if err != nil {
+ return "", err.Error()
+ }
+ result, err := tmpl.ParseFile(lg.(variable.VariableData), data)
+ if err != nil {
+ return "", err.Error()
+ }
+
+ // copy file
+ mode := fs.ModePerm
+ if v := variable.IntVar(args, "mode"); v != nil {
+ mode = fs.FileMode(*v)
+ }
+ if err := conn.CopyFile(ctx, []byte(result), destStr, mode); err != nil {
+ return "", err.Error()
+ }
+ return "success", ""
+}
diff --git a/pkg/modules/template_test.go b/pkg/modules/template_test.go
new file mode 100644
index 00000000..4b9d4b44
--- /dev/null
+++ b/pkg/modules/template_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package modules
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ testassert "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestTemplate(t *testing.T) {
+ absPath, err := filepath.Abs(os.Args[0])
+ if err != nil {
+ fmt.Println("Error getting absolute path:", err)
+ return
+ }
+
+ testcases := []struct {
+ name string
+ opt ExecOptions
+ ctx context.Context
+ exceptStdout string
+ exceptStderr string
+ }{
+ {
+ name: "src is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{},
+ Host: "local",
+ Variable: nil,
+ },
+ ctx: context.Background(),
+ exceptStderr: "\"src\" should be string",
+ },
+ {
+ name: "dest is empty",
+ opt: ExecOptions{
+ Args: runtime.RawExtension{
+ Raw: []byte(fmt.Sprintf(`{"src": %s}`, absPath)),
+ },
+ Host: "local",
+ Variable: nil,
+ },
+ ctx: context.Background(),
+ exceptStderr: "\"dest\" should be string",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(tc.ctx, time.Second*5)
+ defer cancel()
+ acStdout, acStderr := ModuleTemplate(ctx, tc.opt)
+ testassert.Equal(t, tc.exceptStdout, acStdout)
+ testassert.Equal(t, tc.exceptStderr, acStderr)
+ })
+ }
+}
diff --git a/pkg/project/helper.go b/pkg/project/helper.go
new file mode 100644
index 00000000..e600774b
--- /dev/null
+++ b/pkg/project/helper.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+// GetPlaybookBaseFromPlaybook
+// find from project/playbooks/playbook if exists.
+// find from current_playbook/playbooks/playbook if exists.
+// find current_playbook/playbook
+func GetPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) string {
+ var find []string
+ // find from project/playbooks/playbook
+ find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectPlaybooksDir, playbook))
+ // find from pbPath dir like: current_playbook/playbooks/playbook
+ find = append(find, filepath.Join(filepath.Dir(pbPath), _const.ProjectPlaybooksDir, playbook))
+ // find from pbPath dir like: current_playbook/playbook
+ find = append(find, filepath.Join(filepath.Dir(pbPath), playbook))
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+ }
+
+ return ""
+}
+
+// GetRoleBaseFromPlaybook
+// find from project/roles/roleName if exists.
+// find from current_playbook/roles/roleName if exists.
+// find current_playbook/playbook
+func GetRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) string {
+ var find []string
+ // find from project/roles/roleName
+ find = append(find, filepath.Join(filepath.Dir(filepath.Dir(pbPath)), _const.ProjectRolesDir, roleName))
+ // find from pbPath dir like: current_playbook/roles/roleName
+ find = append(find, filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesDir, roleName))
+
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+
+ return ""
+}
+
+// GetFilesFromPlayBook
+func GetFilesFromPlayBook(baseFS fs.FS, pbPath string, roleName string, filePath string) string {
+ if filepath.IsAbs(filePath) {
+ return filePath
+ }
+
+ if roleName != "" {
+ return filepath.Join(GetRoleBaseFromPlaybook(baseFS, pbPath, roleName), _const.ProjectRolesFilesDir, filePath)
+ } else {
+ // find from pbPath dir like: project/playbooks/templates/tmplPath
+ return filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesFilesDir, filePath)
+ }
+}
+
+// GetTemplatesFromPlayBook
+func GetTemplatesFromPlayBook(baseFS fs.FS, pbPath string, roleName string, tmplPath string) string {
+ if filepath.IsAbs(tmplPath) {
+ return tmplPath
+ }
+
+ if roleName != "" {
+ return filepath.Join(GetRoleBaseFromPlaybook(baseFS, pbPath, roleName), _const.ProjectRolesTemplateDir, tmplPath)
+ } else {
+ // find from pbPath dir like: project/playbooks/templates/tmplPath
+ return filepath.Join(filepath.Dir(pbPath), _const.ProjectRolesTemplateDir, tmplPath)
+ }
+}
+
+// GetYamlFile
+// return *.yaml if exists
+// return *.yml if exists.
+func GetYamlFile(baseFS fs.FS, base string) string {
+ var find []string
+ find = append(find,
+ fmt.Sprintf("%s.yaml", base),
+ fmt.Sprintf("%s.yml", base))
+
+ for _, s := range find {
+ if baseFS != nil {
+ if _, err := fs.Stat(baseFS, s); err == nil {
+ return s
+ }
+ } else {
+ if _, err := os.Stat(s); err == nil {
+ return s
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/pkg/project/helper_test.go b/pkg/project/helper_test.go
new file mode 100644
index 00000000..081b0e94
--- /dev/null
+++ b/pkg/project/helper_test.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetPlaybookBaseFromAbsPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ basePlaybook string
+ playbook string
+ except string
+ }{
+ {
+ name: "find from project/playbooks/playbook",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook2.yaml",
+ except: filepath.Join("playbooks", "playbook2.yaml"),
+ },
+ {
+ name: "find from current_playbook/playbooks/playbook",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook3.yaml",
+ except: filepath.Join("playbooks", "playbooks", "playbook3.yaml"),
+ },
+ {
+ name: "cannot find",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ playbook: "playbook4.yaml",
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, GetPlaybookBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.playbook))
+ })
+ }
+}
+
+func TestGetRoleBaseFromAbsPlaybook(t *testing.T) {
+ testcases := []struct {
+ name string
+ basePlaybook string
+ roleName string
+ except string
+ }{
+ {
+ name: "find from project/roles/roleName",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role1",
+ except: filepath.Join("roles", "role1"),
+ },
+ {
+ name: "find from current_playbook/roles/roleName",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role2",
+ except: filepath.Join("playbooks", "roles", "role2"),
+ },
+ {
+ name: "cannot find",
+ basePlaybook: filepath.Join("playbooks", "playbook1.yaml"),
+ roleName: "role3",
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, GetRoleBaseFromPlaybook(os.DirFS("testdata"), tc.basePlaybook, tc.roleName))
+ })
+ }
+}
+
+func TestGetFilesFromPlayBook(t *testing.T) {
+ testcases := []struct {
+ name string
+ pbPath string
+ role string
+ filePath string
+ excepted string
+ }{
+ {
+ name: "absolute filePath",
+ filePath: "/tmp",
+ excepted: "/tmp",
+ },
+ {
+ name: "empty role",
+ pbPath: "playbooks/test.yaml",
+ filePath: "tmp",
+ excepted: "playbooks/files/tmp",
+ },
+ {
+ name: "not empty role",
+ pbPath: "playbooks/test.yaml",
+ role: "role1",
+ filePath: "tmp",
+ excepted: "roles/role1/files/tmp",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, GetFilesFromPlayBook(os.DirFS("testdata"), tc.pbPath, tc.role, tc.filePath))
+ })
+ }
+}
+
+func TestGetTemplatesFromPlayBook(t *testing.T) {
+ testcases := []struct {
+ name string
+ pbPath string
+ role string
+ filePath string
+ excepted string
+ }{
+ {
+ name: "absolute filePath",
+ filePath: "/tmp",
+ excepted: "/tmp",
+ },
+ {
+ name: "empty role",
+ pbPath: "playbooks/test.yaml",
+ filePath: "tmp",
+ excepted: "playbooks/templates/tmp",
+ },
+ {
+ name: "not empty role",
+ pbPath: "playbooks/test.yaml",
+ role: "role1",
+ filePath: "tmp",
+ excepted: "roles/role1/templates/tmp",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, GetTemplatesFromPlayBook(os.DirFS("testdata"), tc.pbPath, tc.role, tc.filePath))
+ })
+ }
+
+}
+
+func TestGetYamlFile(t *testing.T) {
+ testcases := []struct {
+ name string
+ base string
+ except string
+ }{
+ {
+ name: "get yaml",
+ base: filepath.Join("playbooks", "playbook2"),
+ except: filepath.Join("playbooks", "playbook2.yaml"),
+ },
+ {
+ name: "get yml",
+ base: filepath.Join("playbooks", "playbook3"),
+ except: filepath.Join("playbooks", "playbook3.yml"),
+ },
+ {
+ name: "cannot find",
+ base: filepath.Join("playbooks", "playbook4"),
+ except: "",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.except, GetYamlFile(os.DirFS("testdata"), tc.base))
+ })
+ }
+}
diff --git a/pkg/project/project.go b/pkg/project/project.go
new file mode 100644
index 00000000..800aa5c3
--- /dev/null
+++ b/pkg/project/project.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "context"
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+)
+
+type Project interface {
+ FS(ctx context.Context, update bool) (fs.FS, error)
+}
+
+type Options struct {
+ *kubekeyv1.Pipeline
+}
+
+func New(o Options) Project {
+ if strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "https://") ||
+ strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "http://") ||
+ strings.HasPrefix(o.Pipeline.Spec.Project.Addr, "git@") {
+ // git clone to project dir
+ if o.Pipeline.Spec.Project.Name == "" {
+ o.Pipeline.Spec.Project.Name = strings.TrimSuffix(o.Pipeline.Spec.Project.Addr[strings.LastIndex(o.Pipeline.Spec.Project.Addr, "/")+1:], ".git")
+ }
+ return &gitProject{Pipeline: *o.Pipeline, localDir: filepath.Join(_const.GetWorkDir(), _const.ProjectDir, o.Spec.Project.Name)}
+ }
+ return &localProject{Pipeline: *o.Pipeline}
+
+}
diff --git a/pkg/project/project_git.go b/pkg/project/project_git.go
new file mode 100644
index 00000000..54fb713c
--- /dev/null
+++ b/pkg/project/project_git.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "context"
+ "io/fs"
+ "os"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/transport/http"
+ "k8s.io/klog/v2"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+)
+
+// gitProject from git
+type gitProject struct {
+ kubekeyv1.Pipeline
+
+ localDir string
+}
+
+func (r gitProject) FS(ctx context.Context, update bool) (fs.FS, error) {
+ if !update {
+ return os.DirFS(r.localDir), nil
+ }
+ if err := r.init(ctx); err != nil {
+ return nil, err
+ }
+ return os.DirFS(r.localDir), nil
+}
+
+func (r gitProject) init(ctx context.Context) error {
+ if _, err := os.Stat(r.localDir); err != nil {
+ // git clone
+ return r.gitClone(ctx)
+ } else {
+ // git pull
+ return r.gitPull(ctx)
+ }
+}
+
+func (r gitProject) gitClone(ctx context.Context) error {
+ if _, err := git.PlainCloneContext(ctx, r.localDir, false, &git.CloneOptions{
+ URL: r.Pipeline.Spec.Project.Addr,
+ Progress: nil,
+ ReferenceName: plumbing.NewBranchReferenceName(r.Pipeline.Spec.Project.Branch),
+ SingleBranch: true,
+ Auth: &http.TokenAuth{r.Pipeline.Spec.Project.Token},
+ InsecureSkipTLS: false,
+ }); err != nil {
+ klog.Errorf("clone project %s failed: %v", r.Pipeline.Spec.Project.Addr, err)
+ return err
+ }
+ return nil
+}
+
+func (r gitProject) gitPull(ctx context.Context) error {
+ open, err := git.PlainOpen(r.localDir)
+ if err != nil {
+ klog.Errorf("git open local %s error: %v", r.localDir, err)
+ return err
+ }
+ wt, err := open.Worktree()
+ if err != nil {
+ klog.Errorf("git open worktree error: %v", err)
+ return err
+ }
+ if err := wt.PullContext(ctx, &git.PullOptions{
+ RemoteURL: r.Pipeline.Spec.Project.Addr,
+ ReferenceName: plumbing.NewBranchReferenceName(r.Pipeline.Spec.Project.Branch),
+ SingleBranch: true,
+ Auth: &http.TokenAuth{r.Pipeline.Spec.Project.Token},
+ InsecureSkipTLS: false,
+ }); err != nil && err != git.NoErrAlreadyUpToDate {
+ klog.Errorf("pull project %s failed: %v", r.Pipeline.Spec.Project.Addr, err)
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/project/project_local.go b/pkg/project/project_local.go
new file mode 100644
index 00000000..ddc4f2ef
--- /dev/null
+++ b/pkg/project/project_local.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package project
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/kubesphere/kubekey/v4/pipeline"
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+)
+
+type localProject struct {
+ kubekeyv1.Pipeline
+
+ fs fs.FS
+}
+
+func (r localProject) FS(ctx context.Context, update bool) (fs.FS, error) {
+ if _, ok := r.Pipeline.Annotations[kubekeyv1.BuiltinsProjectAnnotation]; ok {
+ return pipeline.InternalPipeline, nil
+ }
+ if filepath.IsAbs(r.Pipeline.Spec.Playbook) {
+ return os.DirFS("/"), nil
+ }
+
+ if r.fs != nil {
+ return r.fs, nil
+ }
+
+ if r.Pipeline.Spec.Project.Addr != "" {
+ return os.DirFS(r.Pipeline.Spec.Project.Addr), nil
+ }
+
+ return nil, fmt.Errorf("cannot get filesystem from absolute project %s", r.Pipeline.Spec.Project.Addr)
+}
diff --git a/pkg/project/testdata/playbooks/playbook1.yaml b/pkg/project/testdata/playbooks/playbook1.yaml
new file mode 100644
index 00000000..08e2e5d0
--- /dev/null
+++ b/pkg/project/testdata/playbooks/playbook1.yaml
@@ -0,0 +1,30 @@
+- name: play1
+ hosts: localhost
+ pre_tasks:
+ - name: play1 | pre_block1
+ debug:
+ msg: echo "hello world"
+ tasks:
+ - name: play1 | block1
+ block:
+ - name: play1 | block1 | block1
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block1 | block2
+ debug:
+ msg: echo "hello world"
+ - name: play1 | block2
+ debug:
+ msg: echo "hello world"
+ post_tasks:
+ - name: play1 | post_block1
+ debug:
+ msg: echo "hello world"
+ roles:
+ - role1
+- name: play2
+ hosts: localhost
+ tasks:
+ - name: play2 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/pkg/project/testdata/playbooks/playbook2.yaml b/pkg/project/testdata/playbooks/playbook2.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/pkg/project/testdata/playbooks/playbook2.yml b/pkg/project/testdata/playbooks/playbook2.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/pkg/project/testdata/playbooks/playbook3.yml b/pkg/project/testdata/playbooks/playbook3.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/pkg/project/testdata/playbooks/playbooks/playbook3.yaml b/pkg/project/testdata/playbooks/playbooks/playbook3.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml b/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml
new file mode 100644
index 00000000..0a50611a
--- /dev/null
+++ b/pkg/project/testdata/playbooks/roles/role2/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: role1 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/pkg/project/testdata/roles/role1/tasks/main.yaml b/pkg/project/testdata/roles/role1/tasks/main.yaml
new file mode 100644
index 00000000..0a50611a
--- /dev/null
+++ b/pkg/project/testdata/roles/role1/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: role1 | block1
+ debug:
+ msg: echo "hello world"
diff --git a/pkg/task/controller.go b/pkg/task/controller.go
new file mode 100644
index 00000000..89974013
--- /dev/null
+++ b/pkg/task/controller.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "context"
+
+ "golang.org/x/time/rate"
+ "k8s.io/client-go/util/workqueue"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// Controller is the interface for running tasks
+type Controller interface {
+ // Start the controller
+ Start(ctx context.Context) error
+ // AddTasks adds tasks to the controller
+ AddTasks(ctx context.Context, o AddTaskOptions) error
+}
+
+type AddTaskOptions struct {
+ *kubekeyv1.Pipeline
+ // set by AddTask function
+ variable variable.Variable
+}
+
+type ControllerOptions struct {
+ MaxConcurrent int
+ ctrlclient.Client
+ TaskReconciler reconcile.Reconciler
+}
+
+func NewController(o ControllerOptions) (Controller, error) {
+ if o.MaxConcurrent == 0 {
+ o.MaxConcurrent = 1
+ }
+ if o.Client == nil {
+ o.Client = cache.NewDelegatingClient(nil)
+ }
+
+ return &taskController{
+ MaxConcurrent: o.MaxConcurrent,
+ wq: workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}),
+ client: o.Client,
+ taskReconciler: o.TaskReconciler,
+ }, nil
+}
diff --git a/pkg/task/helper.go b/pkg/task/helper.go
new file mode 100644
index 00000000..f72d5a07
--- /dev/null
+++ b/pkg/task/helper.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "strings"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/connector"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+// getGatherFact get host info
+func getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (variable.VariableData, error) {
+ v, err := vars.Get(variable.HostVars{HostName: hostname})
+ if err != nil {
+ klog.Errorf("get host %s all variable error %v", hostname, err)
+ return nil, err
+ }
+ conn := connector.NewConnector(hostname, v.(variable.VariableData))
+ if err := conn.Init(ctx); err != nil {
+ klog.Errorf("init connection error %v", err)
+ return nil, err
+ }
+ defer conn.Close(ctx)
+
+ // os information
+ osVars := make(variable.VariableData)
+ var osRelease bytes.Buffer
+ if err := conn.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
+ klog.Errorf("fetch os-release error %v", err)
+ return nil, err
+ }
+ osVars["release"] = convertBytesToMap(osRelease.Bytes(), "=")
+ kernel, err := conn.ExecuteCommand(ctx, "uname -r")
+ if err != nil {
+ klog.Errorf("get kernel version error %v", err)
+ return nil, err
+ }
+ osVars["kernelVersion"] = string(bytes.TrimSuffix(kernel, []byte("\n")))
+ hn, err := conn.ExecuteCommand(ctx, "hostname")
+ if err != nil {
+ klog.Errorf("get hostname error %v", err)
+ return nil, err
+ }
+ osVars["hostname"] = string(bytes.TrimSuffix(hn, []byte("\n")))
+ arch, err := conn.ExecuteCommand(ctx, "arch")
+ if err != nil {
+ klog.Errorf("get arch error %v", err)
+ return nil, err
+ }
+ osVars["architecture"] = string(bytes.TrimSuffix(arch, []byte("\n")))
+
+ // process information
+ procVars := make(variable.VariableData)
+ var cpu bytes.Buffer
+ if err := conn.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
+ klog.Errorf("fetch cpu error %v", err)
+ return nil, err
+ }
+ procVars["cpuInfo"] = convertBytesToSlice(cpu.Bytes(), ":")
+ var mem bytes.Buffer
+ if err := conn.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
+ klog.Errorf("fetch os-release error %v", err)
+ return nil, err
+ }
+ procVars["memInfo"] = convertBytesToMap(mem.Bytes(), ":")
+
+ return variable.VariableData{
+ "os": osVars,
+ "process": procVars,
+ }, nil
+}
+
+// convertBytesToMap with split string, only convert line which contain split
+func convertBytesToMap(bs []byte, split string) map[string]string {
+ config := make(map[string]string)
+ scanner := bufio.NewScanner(bytes.NewBuffer(bs))
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.SplitN(line, split, 2)
+ if len(parts) == 2 {
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ config[key] = value
+ }
+ }
+ return config
+}
+
+// convertBytesToSlice with split string. only convert line which contain split.
+// group by empty line
+func convertBytesToSlice(bs []byte, split string) []map[string]string {
+ var config []map[string]string
+ currentMap := make(map[string]string)
+
+ scanner := bufio.NewScanner(bytes.NewBuffer(bs))
+ for scanner.Scan() {
+ line := scanner.Text()
+ line = strings.TrimSpace(line)
+
+ if len(line) > 0 {
+ parts := strings.SplitN(line, split, 2)
+ if len(parts) == 2 {
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ currentMap[key] = value
+ }
+ } else {
+ // If encountering an empty line, add the current map to config and create a new map
+ if len(currentMap) > 0 {
+ config = append(config, currentMap)
+ currentMap = make(map[string]string)
+ }
+ }
+ }
+
+ // Add the last map if not already added
+ if len(currentMap) > 0 {
+ config = append(config, currentMap)
+ }
+
+ return config
+}
diff --git a/pkg/task/helper_test.go b/pkg/task/helper_test.go
new file mode 100644
index 00000000..67f16499
--- /dev/null
+++ b/pkg/task/helper_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConvertBytesToMap(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted map[string]string
+ }{
+ {
+ name: "succeed",
+ data: []byte(`PRETTY_NAME="Ubuntu 22.04.1 LTS"
+NAME="Ubuntu"
+VERSION_ID="22.04"
+VERSION="22.04.1 LTS (Jammy Jellyfish)"
+VERSION_CODENAME=jammy
+ID=ubuntu
+ID_LIKE=debian
+HOME_URL="https://www.ubuntu.com/"
+SUPPORT_URL="https://help.ubuntu.com/"
+BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+UBUNTU_CODENAME=jammy
+`),
+ excepted: map[string]string{
+ "PRETTY_NAME": "\"Ubuntu 22.04.1 LTS\"",
+ "NAME": "\"Ubuntu\"",
+ "VERSION_ID": "\"22.04\"",
+ "VERSION": "\"22.04.1 LTS (Jammy Jellyfish)\"",
+ "VERSION_CODENAME": "jammy",
+ "ID": "ubuntu",
+ "ID_LIKE": "debian",
+ "HOME_URL": "\"https://www.ubuntu.com/\"",
+ "SUPPORT_URL": "\"https://help.ubuntu.com/\"",
+ "BUG_REPORT_URL": "\"https://bugs.launchpad.net/ubuntu/\"",
+ "PRIVACY_POLICY_URL": "\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"",
+ "UBUNTU_CODENAME": "jammy",
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, convertBytesToMap(tc.data, "="))
+ })
+ }
+}
+
+func TestConvertBytesToSlice(t *testing.T) {
+ testcases := []struct {
+ name string
+ data []byte
+ excepted []map[string]string
+ }{
+ {
+ name: "succeed",
+ data: []byte(`processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel Core Processor (Haswell, no TSX, IBRS)
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+`),
+ excepted: []map[string]string{
+ {
+ "processor": "0",
+ "vendor_id": "GenuineIntel",
+ "cpu family": "6",
+ "model": "60",
+ "model name": "Intel Core Processor (Haswell, no TSX, IBRS)",
+ },
+ {
+ "processor": "1",
+ "vendor_id": "GenuineIntel",
+ "cpu family": "6",
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.excepted, convertBytesToSlice(tc.data, ":"))
+ })
+ }
+}
diff --git a/pkg/task/internal.go b/pkg/task/internal.go
new file mode 100644
index 00000000..d9112353
--- /dev/null
+++ b/pkg/task/internal.go
@@ -0,0 +1,351 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package task
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/google/uuid"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/client-go/util/workqueue"
+ "k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ "github.com/kubesphere/kubekey/v4/pkg/cache"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/converter"
+ "github.com/kubesphere/kubekey/v4/pkg/modules"
+ "github.com/kubesphere/kubekey/v4/pkg/project"
+ "github.com/kubesphere/kubekey/v4/pkg/variable"
+)
+
+type taskController struct {
+ client ctrlclient.Client
+ taskReconciler reconcile.Reconciler
+
+ wq workqueue.RateLimitingInterface
+ MaxConcurrent int
+}
+
+func (c *taskController) AddTasks(ctx context.Context, o AddTaskOptions) error {
+ var nsTasks = &kubekeyv1alpha1.TaskList{}
+
+ if err := c.client.List(ctx, nsTasks, ctrlclient.InNamespace(o.Pipeline.Namespace)); err != nil {
+ klog.Errorf("[Pipeline %s] list tasks error: %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), err)
+ return err
+ }
+ defer func() {
+ // add task to workqueue
+ for _, task := range nsTasks.Items {
+ c.wq.Add(ctrl.Request{ctrlclient.ObjectKeyFromObject(&task)})
+ }
+ converter.CalculatePipelineStatus(nsTasks, o.Pipeline)
+ }()
+
+ // filter by ownerReference
+ for i := len(nsTasks.Items) - 1; i >= 0; i-- {
+ var hasOwner bool
+ for _, ref := range nsTasks.Items[i].OwnerReferences {
+ if ref.UID == o.Pipeline.UID && ref.Kind == "Pipeline" {
+ hasOwner = true
+ }
+ }
+
+ if !hasOwner {
+ nsTasks.Items = append(nsTasks.Items[:i], nsTasks.Items[i+1:]...)
+ }
+ }
+
+ if len(nsTasks.Items) == 0 {
+ // if tasks has not generated. generate tasks from pipeline
+ vars, ok := cache.LocalVariable.Get(string(o.Pipeline.UID))
+ if ok {
+ o.variable = vars.(variable.Variable)
+ } else {
+ newVars, err := variable.New(variable.Options{
+ Ctx: ctx,
+ Client: c.client,
+ Pipeline: *o.Pipeline,
+ })
+ if err != nil {
+ klog.Errorf("[Pipeline %s] create variable failed: %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), err)
+ return err
+ }
+ cache.LocalVariable.Put(string(o.Pipeline.UID), newVars)
+ o.variable = newVars
+ }
+
+ klog.V(4).Infof("[Pipeline %s] deal project", ctrlclient.ObjectKeyFromObject(o.Pipeline))
+ projectFs, err := project.New(project.Options{Pipeline: o.Pipeline}).FS(ctx, true)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] deal project error: %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), err)
+ return err
+ }
+
+ // convert to transfer.Playbook struct
+ pb, err := converter.MarshalPlaybook(projectFs, o.Pipeline.Spec.Playbook)
+ if err != nil {
+ return err
+ }
+
+ for _, play := range pb.Play {
+ if !play.Taggable.IsEnabled(o.Pipeline.Spec.Tags, o.Pipeline.Spec.SkipTags) {
+ continue
+ }
+ // convert Hosts (group or host) to all hosts
+ ahn, err := o.variable.Get(variable.Hostnames{Name: play.PlayHost.Hosts})
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get all host name error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), err)
+ return err
+ }
+
+ // gather_fact
+ if play.GatherFacts {
+ for _, h := range ahn.([]string) {
+ gfv, err := getGatherFact(ctx, h, o.variable)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get gather fact from host %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), h, err)
+ return err
+ }
+ if err := o.variable.Merge(variable.HostMerge{
+ HostNames: []string{h},
+ LocationUID: "",
+ Data: gfv,
+ }); err != nil {
+ klog.Errorf("[Pipeline %s] merge gather fact from host %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), h, err)
+ return err
+ }
+ }
+ }
+
+ var hs [][]string
+ if play.RunOnce {
+ // runOnce only run in first node
+ hs = [][]string{{ahn.([]string)[0]}}
+ } else {
+ // group hosts by serial. run the playbook by serial
+ hs, err = converter.GroupHostBySerial(ahn.([]string), play.Serial.Data)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] convert host by serial error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), err)
+ return err
+ }
+ }
+
+ // split play by hosts group
+ for _, h := range hs {
+ puid := uuid.NewString()
+ if err := o.variable.Merge(variable.LocationMerge{
+ Uid: puid,
+ Name: play.Name,
+ Type: variable.BlockLocation,
+ Vars: play.Vars,
+ }); err != nil {
+ return err
+ }
+ hctx := context.WithValue(ctx, _const.CtxBlockHosts, h)
+ // generate task from pre tasks
+ preTasks, err := c.block2Task(hctx, o, play.PreTasks, nil, puid, variable.BlockLocation)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get pre task from play %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), play.Name, err)
+ return err
+ }
+ nsTasks.Items = append(nsTasks.Items, preTasks...)
+ // generate task from role
+ for _, role := range play.Roles {
+ ruid := uuid.NewString()
+ if err := o.variable.Merge(variable.LocationMerge{
+ ParentID: puid,
+ Uid: ruid,
+ Name: play.Name,
+ Type: variable.BlockLocation,
+ Vars: role.Vars,
+ }); err != nil {
+ return err
+ }
+ roleTasks, err := c.block2Task(context.WithValue(hctx, _const.CtxBlockRole, role.Role), o, role.Block, role.When.Data, ruid, variable.BlockLocation)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get role from play %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), puid, err)
+ return err
+ }
+ nsTasks.Items = append(nsTasks.Items, roleTasks...)
+ }
+ // generate task from tasks
+ tasks, err := c.block2Task(hctx, o, play.Tasks, nil, puid, variable.BlockLocation)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get pre task from play %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), puid, err)
+ return err
+ }
+ nsTasks.Items = append(nsTasks.Items, tasks...)
+ // generate task from post tasks
+ postTasks, err := c.block2Task(hctx, o, play.Tasks, nil, puid, variable.BlockLocation)
+ if err != nil {
+ klog.Errorf("[Pipeline %s] get pre task from play %s error %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), puid, err)
+ return err
+ }
+ nsTasks.Items = append(nsTasks.Items, postTasks...)
+ }
+ }
+
+ for _, task := range nsTasks.Items {
+ if err := c.client.Create(ctx, &task); err != nil {
+ klog.Errorf("[Pipeline %s] create task %s error: %v", ctrlclient.ObjectKeyFromObject(o.Pipeline), task.Name, err)
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// block2Task convert ansible block to task
+func (k *taskController) block2Task(ctx context.Context, o AddTaskOptions, ats []kkcorev1.Block, when []string, parentLocation string, locationType variable.LocationType) ([]kubekeyv1alpha1.Task, error) {
+ var tasks []kubekeyv1alpha1.Task
+
+ for _, at := range ats {
+ if !at.Taggable.IsEnabled(o.Pipeline.Spec.Tags, o.Pipeline.Spec.SkipTags) {
+ continue
+ }
+ buid := uuid.NewString()
+ if err := o.variable.Merge(variable.LocationMerge{
+ Uid: buid,
+ ParentID: parentLocation,
+ Type: locationType,
+ Name: at.Name,
+ Vars: at.Vars,
+ }); err != nil {
+ return nil, err
+ }
+ atWhen := append(when, at.When.Data...)
+
+ if len(at.Block) != 0 {
+ // add block
+ bt, err := k.block2Task(ctx, o, at.Block, atWhen, buid, variable.BlockLocation)
+ if err != nil {
+ return nil, err
+ }
+ tasks = append(tasks, bt...)
+
+ if len(at.Always) != 0 {
+ at, err := k.block2Task(ctx, o, at.Always, atWhen, buid, variable.AlwaysLocation)
+ if err != nil {
+ return nil, err
+ }
+ tasks = append(tasks, at...)
+ }
+ if len(at.Rescue) != 0 {
+ rt, err := k.block2Task(ctx, o, at.Rescue, atWhen, buid, variable.RescueLocation)
+ if err != nil {
+ return nil, err
+ }
+ tasks = append(tasks, rt...)
+ }
+ } else {
+ task := converter.MarshalBlock(context.WithValue(context.WithValue(ctx, _const.CtxBlockWhen, atWhen), _const.CtxBlockTaskUID, buid),
+ at, o.Pipeline)
+
+ for n, a := range at.UnknownFiled {
+ data, err := json.Marshal(a)
+ if err != nil {
+ return nil, err
+ }
+ if m := modules.FindModule(n); m != nil {
+ task.Spec.Module.Name = n
+ task.Spec.Module.Args = runtime.RawExtension{Raw: data}
+ break
+ }
+ }
+ if task.Spec.Module.Name == "" { // action is necessary for a task
+ return nil, fmt.Errorf("no module/action detected in task: %s", task.Name)
+ }
+ tasks = append(tasks, *task)
+ }
+ }
+ return tasks, nil
+}
+
+// Start task controller, deal task in work queue
+func (k *taskController) Start(ctx context.Context) error {
+ go func() {
+ <-ctx.Done()
+ k.wq.ShutDown()
+ }()
+ // deal work queue
+ wg := &sync.WaitGroup{}
+ for i := 0; i < k.MaxConcurrent; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for k.processNextWorkItem(ctx) {
+ }
+ }()
+ }
+ <-ctx.Done()
+ wg.Wait()
+ return nil
+}
+
+func (k *taskController) processNextWorkItem(ctx context.Context) bool {
+ obj, shutdown := k.wq.Get()
+ if shutdown {
+ return false
+ }
+
+ defer k.wq.Done(obj)
+
+ req, ok := obj.(ctrl.Request)
+ if !ok {
+ // As the item in the workqueue is actually invalid, we call
+ // Forget here else we'd go into a loop of attempting to
+ // process a work item that is invalid.
+ k.wq.Forget(obj)
+ klog.Errorf("Queue item %v was not a Request", obj)
+ // Return true, don't take a break
+ return true
+ }
+
+ result, err := k.taskReconciler.Reconcile(ctx, req)
+ switch {
+ case err != nil:
+ k.wq.AddRateLimited(req)
+ klog.Errorf("Reconciler error: %v", err)
+ case result.RequeueAfter > 0:
+ // The result.RequeueAfter request will be lost, if it is returned
+ // along with a non-nil error. But this is intended as
+ // We need to drive to stable reconcile loops before queuing due
+ // to result.RequestAfter
+ k.wq.Forget(obj)
+ k.wq.AddAfter(req, result.RequeueAfter)
+ case result.Requeue:
+ k.wq.AddRateLimited(req)
+ default:
+ // Finally, if no error occurs we Forget this item so it does not
+ // get queued again until another change happens.
+ k.wq.Forget(obj)
+ }
+ return true
+}
+
+func (k *taskController) NeedLeaderElection() bool {
+ return true
+}
diff --git a/pkg/variable/helper.go b/pkg/variable/helper.go
new file mode 100644
index 00000000..7faa0ab7
--- /dev/null
+++ b/pkg/variable/helper.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "strconv"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/yaml"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+)
+
+// mergeVariables merge multiple variables into one variable
+// v2 will override v1 if variable is repeated
+func mergeVariables(v1, v2 VariableData) VariableData {
+ mergedVars := make(VariableData)
+ for k, v := range v1 {
+ mergedVars[k] = v
+ }
+ for k, v := range v2 {
+ mergedVars[k] = v
+ }
+ return mergedVars
+}
+
+func findLocation(loc []location, uid string) *location {
+ for i := range loc {
+ if uid == loc[i].UID {
+ return &loc[i]
+ }
+ // find in block,always,rescue
+ if l := findLocation(append(append(loc[i].Block, loc[i].Always...), loc[i].Rescue...), uid); l != nil {
+ return l
+ }
+ }
+ return nil
+}
+
+func convertGroup(inv kubekeyv1.Inventory) VariableData {
+ groups := make(VariableData)
+ all := make([]string, 0)
+ for hn := range inv.Spec.Hosts {
+ all = append(all, hn)
+ }
+ groups["all"] = all
+ for gn := range inv.Spec.Groups {
+ groups[gn] = hostsInGroup(inv, gn)
+ }
+ return groups
+}
+
+func hostsInGroup(inv kubekeyv1.Inventory, groupName string) []string {
+ if v, ok := inv.Spec.Groups[groupName]; ok {
+ var hosts []string
+ for _, cg := range v.Groups {
+ hosts = mergeSlice(hostsInGroup(inv, cg), hosts)
+ }
+ return mergeSlice(hosts, v.Hosts)
+ }
+ return nil
+}
+
+// StringVar get string value by key
+func StringVar(vars VariableData, key string) *string {
+ value, ok := vars[key]
+ if !ok {
+ klog.V(4).Infof("cannot find variable %s", key)
+ return nil
+ }
+ sv, ok := value.(string)
+ if !ok {
+ klog.V(4).Infof("variable %s is not string", key)
+ return nil
+ }
+ return &sv
+}
+
+// IntVar get int value by key
+func IntVar(vars VariableData, key string) *int {
+ value, ok := vars[key]
+ if !ok {
+ klog.V(4).Infof("cannot find variable %s", key)
+ return nil
+ }
+ // default convert to float64
+ number, ok := value.(float64)
+ if !ok {
+ klog.V(4).Infof("variable %s is not string", key)
+ return nil
+ }
+ vi := int(number)
+ return &vi
+}
+
+// StringSliceVar get string slice value by key
+func StringSliceVar(vars VariableData, key string) []string {
+ value, ok := vars[key]
+ if !ok {
+ klog.V(4).Infof("cannot find variable %s", key)
+ return nil
+ }
+ sv, ok := value.([]any)
+ if !ok {
+ klog.V(4).Infof("variable %s is not slice", key)
+ return nil
+ }
+ var ss []string
+ for _, a := range sv {
+ av, ok := a.(string)
+ if !ok {
+ klog.V(4).Infof("value in variable %s is not string", key)
+ return nil
+ }
+ ss = append(ss, av)
+ }
+ return ss
+}
+
+func Extension2Variables(ext runtime.RawExtension) VariableData {
+ if len(ext.Raw) == 0 {
+ return nil
+ }
+
+ var data VariableData
+ if err := yaml.Unmarshal(ext.Raw, &data); err != nil {
+ klog.Errorf("failed to unmarshal extension to variables: %v", err)
+ }
+ return data
+}
+
+func Extension2Slice(ext runtime.RawExtension) []any {
+ if len(ext.Raw) == 0 {
+ return nil
+ }
+
+ var data []any
+ if err := yaml.Unmarshal(ext.Raw, &data); err != nil {
+ klog.Errorf("failed to unmarshal extension to any: %v", err)
+ }
+ return data
+}
+
+func Extension2String(ext runtime.RawExtension) string {
+ if len(ext.Raw) == 0 {
+ return ""
+ }
+ // try to escape string
+ if ns, err := strconv.Unquote(string(ext.Raw)); err == nil {
+ return ns
+ }
+ return string(ext.Raw)
+}
diff --git a/pkg/variable/helper_test.go b/pkg/variable/helper_test.go
new file mode 100644
index 00000000..86ddcc51
--- /dev/null
+++ b/pkg/variable/helper_test.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMergeVariable(t *testing.T) {
+ testcases := []struct {
+ name string
+ v1 VariableData
+ v2 VariableData
+ excepted VariableData
+ }{
+ {
+ name: "primary variables value is empty",
+ v1: nil,
+ v2: VariableData{
+ "a1": "v1",
+ },
+ excepted: VariableData{
+ "a1": "v1",
+ },
+ },
+ {
+ name: "auxiliary variables value is empty",
+ v1: VariableData{
+ "p1": "v1",
+ },
+ v2: nil,
+ excepted: VariableData{
+ "p1": "v1",
+ },
+ },
+ {
+ name: "non-repeat value",
+ v1: VariableData{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ },
+ },
+ v2: VariableData{
+ "a1": "v1",
+ },
+ excepted: VariableData{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ },
+ "a1": "v1",
+ },
+ },
+ {
+ name: "repeat value",
+ v1: VariableData{
+ "p1": "v1",
+ "p2": map[string]any{
+ "p21": "v21",
+ "p22": "v22",
+ },
+ },
+ v2: VariableData{
+ "a1": "v1",
+ "p1": "v2",
+ "p2": map[string]any{
+ "p21": "v22",
+ "a21": "v21",
+ },
+ },
+ excepted: VariableData{
+ "a1": "v1",
+ "p1": "v2",
+ "p2": map[string]any{
+ "p21": "v22",
+ "a21": "v21",
+ },
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ v := mergeVariables(tc.v1, tc.v2)
+ assert.Equal(t, tc.excepted, v)
+ })
+ }
+}
diff --git a/pkg/variable/internal.go b/pkg/variable/internal.go
new file mode 100644
index 00000000..c9010f70
--- /dev/null
+++ b/pkg/variable/internal.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "k8s.io/klog/v2"
+ "k8s.io/utils/strings/slices"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+type variable struct {
+ source source.Source
+
+ value *value
+
+ sync.Mutex
+}
+
+// value is the specific data contained in the variable
+type value struct {
+ kubekeyv1.Config `json:"-"`
+ kubekeyv1.Inventory `json:"-"`
+ // Hosts store the variable for running tasks on specific hosts
+ Hosts map[string]host `json:"hosts"`
+ // Location is the complete location index.
+ // This index can help us determine the specific location of the task,
+ // enabling us to retrieve the task's parameters and establish the execution order.
+ Location []location `json:"location"`
+}
+
+func (v value) deepCopy() value {
+ nv := value{}
+ data, err := json.Marshal(v)
+ if err != nil {
+ return value{}
+ }
+ if err := json.Unmarshal(data, &nv); err != nil {
+ return value{}
+ }
+ return nv
+}
+
+// getGlobalVars get defined variable from inventory and config
+func (v *value) getGlobalVars(hostname string) VariableData {
+ // get host vars
+ hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname])
+ // set inventory_hostname to hostVars
+ // inventory_hostname" is the hostname configured in the inventory file.
+ hostVars = mergeVariables(hostVars, VariableData{
+ "inventory_hostname": hostname,
+ })
+ // merge group vars to host vars
+ for _, gv := range v.Inventory.Spec.Groups {
+ if slices.Contains(gv.Hosts, hostname) {
+ hostVars = mergeVariables(hostVars, Extension2Variables(gv.Vars))
+ }
+ }
+ // merge inventory vars to host vars
+ hostVars = mergeVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars))
+ // merge config vars to host vars
+ hostVars = mergeVariables(hostVars, Extension2Variables(v.Config.Spec))
+
+ // external vars
+ hostVars = mergeVariables(hostVars, VariableData{
+ "groups": convertGroup(v.Inventory),
+ })
+
+ return hostVars
+}
+
+type location struct {
+ // UID is current location uid
+ UID string `json:"uid"`
+ // PUID is the parent uid for current location
+ PUID string `json:"puid"`
+ // Name is the name of current location
+ Name string `json:"name"`
+ // Vars is the variable of current location
+ Vars VariableData `json:"vars,omitempty"`
+
+ Block []location `json:"block,omitempty"`
+ Always []location `json:"always,omitempty"`
+ Rescue []location `json:"rescue,omitempty"`
+}
+
+// VariableData is the variable data
+type VariableData map[string]any
+
+func (v VariableData) String() string {
+ data, err := json.Marshal(v)
+ if err != nil {
+ klog.Errorf("marshal in error: %v", err)
+ return ""
+ }
+ return string(data)
+}
+
+type host struct {
+ Vars VariableData `json:"vars"`
+ RuntimeVars map[string]VariableData `json:"runtime"`
+}
+
+func (v *variable) Get(option GetOption) (any, error) {
+ return option.filter(*v.value)
+}
+
+func (v *variable) Merge(mo ...MergeOption) error {
+ v.Lock()
+ defer v.Unlock()
+
+ old := v.value.deepCopy()
+ for _, o := range mo {
+ if err := o.mergeTo(v.value); err != nil {
+ return err
+ }
+ }
+
+ if !reflect.DeepEqual(old.Location, v.value.Location) {
+ if err := v.syncLocation(); err != nil {
+ klog.Errorf("sync location error %v", err)
+ }
+ }
+
+ for hn, hv := range v.value.Hosts {
+ if !reflect.DeepEqual(old.Hosts[hn], hv) {
+ if err := v.syncHosts(hn); err != nil {
+ klog.Errorf("sync group error %v", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (v *variable) syncLocation() error {
+ data, err := json.MarshalIndent(v.value.Location, "", " ")
+ if err != nil {
+ klog.Errorf("marshal location data failed: %v", err)
+ return err
+ }
+ if err := v.source.Write(data, _const.RuntimePipelineVariableLocationFile); err != nil {
+ klog.Errorf("write location data to local file %s error %v", _const.RuntimePipelineVariableLocationFile, err)
+ return err
+ }
+ return nil
+}
+
+// syncHosts sync hosts data to local file. If hostname is empty, sync all hosts
+func (v *variable) syncHosts(hostname ...string) error {
+ for _, hn := range hostname {
+ if hv, ok := v.value.Hosts[hn]; ok {
+ data, err := json.MarshalIndent(hv, "", " ")
+ if err != nil {
+ klog.Errorf("marshal host %s data failed: %v", hn, err)
+ return err
+ }
+ if err := v.source.Write(data, fmt.Sprintf("%s.json", hn)); err != nil {
+ klog.Errorf("write host data to local file %s.json error %v", hn, err)
+ }
+ }
+ }
+ return nil
+}
+
+// mergeSlice with skip repeat value
+func mergeSlice(g1, g2 []string) []string {
+ uniqueValues := make(map[string]bool)
+ mg := []string{}
+
+ // Add values from the first slice
+ for _, v := range g1 {
+ if !uniqueValues[v] {
+ uniqueValues[v] = true
+ mg = append(mg, v)
+ }
+ }
+
+ // Add values from the second slice
+ for _, v := range g2 {
+ if !uniqueValues[v] {
+ uniqueValues[v] = true
+ mg = append(mg, v)
+ }
+ }
+
+ return mg
+}
diff --git a/pkg/variable/internal_test.go b/pkg/variable/internal_test.go
new file mode 100644
index 00000000..3cd6da0a
--- /dev/null
+++ b/pkg/variable/internal_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMergeGroup(t *testing.T) {
+ testcases := []struct {
+ name string
+ g1 []string
+ g2 []string
+ except []string
+ }{
+ {
+ name: "non-repeat",
+ g1: []string{
+ "h1", "h2", "h3",
+ },
+ g2: []string{
+ "h4", "h5",
+ },
+ except: []string{
+ "h1", "h2", "h3", "h4", "h5",
+ },
+ },
+ {
+ name: "repeat value",
+ g1: []string{
+ "h1", "h2", "h3",
+ },
+ g2: []string{
+ "h3", "h4", "h5",
+ },
+ except: []string{
+ "h1", "h2", "h3", "h4", "h5",
+ },
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ ac := mergeSlice(tc.g1, tc.g2)
+ assert.Equal(t, tc.except, ac)
+ })
+ }
+}
diff --git a/pkg/variable/source/file.go b/pkg/variable/source/file.go
new file mode 100644
index 00000000..39077fc9
--- /dev/null
+++ b/pkg/variable/source/file.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package source
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/klog/v2"
+)
+
+type fileSource struct {
+ path string
+}
+
+func (f *fileSource) Read() (map[string][]byte, error) {
+ de, err := os.ReadDir(f.path)
+ if err != nil {
+ klog.Errorf("read dir %s error %v", f.path, err)
+ return nil, err
+ }
+ var result map[string][]byte
+ for _, entry := range de {
+ if entry.IsDir() {
+ continue
+ }
+ if result == nil {
+ result = make(map[string][]byte)
+ }
+ // only read json data
+ if strings.HasSuffix(entry.Name(), ".json") {
+ data, err := os.ReadFile(filepath.Join(f.path, entry.Name()))
+ if err != nil {
+ return nil, err
+ }
+ result[entry.Name()] = data
+ }
+ }
+
+ return result, nil
+}
+
+func (f *fileSource) Write(data []byte, filename string) error {
+ file, err := os.Create(filepath.Join(f.path, filename))
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ if _, err := file.Write(data); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/variable/source/source.go b/pkg/variable/source/source.go
new file mode 100644
index 00000000..f8f49aba
--- /dev/null
+++ b/pkg/variable/source/source.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package source
+
+import (
+ "io/fs"
+ "os"
+
+ "k8s.io/klog/v2"
+)
+
+// Source is the source from which config is loaded.
+type Source interface {
+ Read() (map[string][]byte, error)
+ Write(data []byte, filename string) error
+ //Watch() (Watcher, error)
+}
+
+// Watcher watches a source for changes.
+type Watcher interface {
+ Next() ([]byte, error)
+ Stop() error
+}
+
+// New returns a new source.
+func New(path string) (Source, error) {
+ if _, err := os.Stat(path); err != nil {
+ if err := os.MkdirAll(path, fs.ModePerm); err != nil {
+ klog.Errorf("create source path %s error: %v", path, err)
+ return nil, err
+ }
+ }
+ return &fileSource{path: path}, nil
+}
diff --git a/pkg/variable/variable.go b/pkg/variable/variable.go
new file mode 100644
index 00000000..cbd41e6a
--- /dev/null
+++ b/pkg/variable/variable.go
@@ -0,0 +1,551 @@
+/*
+Copyright 2023 The KubeSphere Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package variable
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog/v2"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
+ "github.com/kubesphere/kubekey/v4/pkg/variable/source"
+)
+
+type Variable interface {
+ Get(option GetOption) (any, error)
+ Merge(option ...MergeOption) error
+}
+
+type Options struct {
+ Ctx context.Context
+ Client ctrlclient.Client
+ Pipeline kubekeyv1.Pipeline
+}
+
+// New variable. generate value from config args. and render to source.
+func New(o Options) (Variable, error) {
+ // new source
+ s, err := source.New(filepath.Join(_const.RuntimeDirFromObject(&o.Pipeline), _const.RuntimePipelineVariableDir))
+ if err != nil {
+ klog.Errorf("create file source failed: %v", err)
+ return nil, err
+ }
+ // get config
+ var config = &kubekeyv1.Config{}
+ if err := o.Client.Get(o.Ctx, types.NamespacedName{o.Pipeline.Spec.ConfigRef.Namespace, o.Pipeline.Spec.ConfigRef.Name}, config); err != nil {
+ klog.Errorf("get config from pipeline error %v", err)
+ return nil, err
+ }
+ // get inventory
+ var inventory = &kubekeyv1.Inventory{}
+ if err := o.Client.Get(o.Ctx, types.NamespacedName{o.Pipeline.Spec.InventoryRef.Namespace, o.Pipeline.Spec.InventoryRef.Name}, inventory); err != nil {
+ klog.Errorf("get inventory from pipeline error %v", err)
+ return nil, err
+ }
+ v := &variable{
+ source: s,
+ value: &value{
+ Config: *config,
+ Inventory: *inventory,
+ Hosts: make(map[string]host),
+ },
+ }
+ // read data from source
+ data, err := v.source.Read()
+ if err != nil {
+ klog.Errorf("read data from source error %v", err)
+ return nil, err
+ }
+ for k, d := range data {
+ if k == _const.RuntimePipelineVariableLocationFile {
+ // set location
+ if err := json.Unmarshal(d, &v.value.Location); err != nil {
+ klog.Errorf("unmarshal location error %v", err)
+ return nil, err
+ }
+ } else {
+ // set hosts
+ h := host{}
+ if err := json.Unmarshal(d, &h); err != nil {
+ klog.Errorf("unmarshal host error %v", err)
+ return nil, err
+ }
+ v.value.Hosts[strings.TrimSuffix(k, ".json")] = h
+ }
+ }
+ return v, nil
+}
+
+type GetOption interface {
+ filter(data value) (any, error)
+}
+
+// KeyPath get a key path variable
+type KeyPath struct {
+ // HostName which host obtain the variable
+ HostName string
+ // LocationUID locate which variable belong to
+ LocationUID string
+ // Path base top variable.
+ Path []string
+}
+
+func (k KeyPath) filter(data value) (any, error) {
+ // find value from location
+ var getLocationFunc func(uid string) any
+ getLocationFunc = func(uid string) any {
+ if loc := findLocation(data.Location, uid); loc != nil {
+ // find value from task
+ if v, ok := data.Hosts[k.HostName].RuntimeVars[uid]; ok {
+ if result := k.getValue(v, k.Path...); result != nil {
+ return result
+ }
+ }
+ if result := k.getValue(loc.Vars, k.Path...); result != nil {
+ return result
+ }
+ if loc.PUID != "" {
+ return getLocationFunc(loc.PUID)
+ }
+ }
+ return nil
+ }
+ if result := getLocationFunc(k.LocationUID); result != nil {
+ return result, nil
+ }
+
+ // find value from host
+ if result := k.getValue(data.Hosts[k.HostName].Vars, k.Path...); result != nil {
+ return result, nil
+ }
+
+ // find value from global
+ if result := k.getValue(data.getGlobalVars(k.HostName), k.Path...); result != nil {
+ return result, nil
+ }
+ return nil, nil
+}
+
+// getValue from variable.VariableData use key path. if key path is empty return nil
+func (k KeyPath) getValue(value VariableData, key ...string) any {
+ if len(key) == 0 {
+ return nil
+ }
+ var result any
+ result = value
+ for _, s := range key {
+ result = result.(VariableData)[s]
+ }
+ return result
+}
+
+// ParentLocation UID for current location
+type ParentLocation struct {
+ LocationUID string
+}
+
+func (p ParentLocation) filter(data value) (any, error) {
+ loc := findLocation(data.Location, p.LocationUID)
+ if loc != nil {
+ return loc.PUID, nil
+ }
+ return nil, fmt.Errorf("cannot find location %s", p.LocationUID)
+}
+
+// LocationVars get all variable for location
+type LocationVars struct {
+ // HostName which host obtain the variable
+ HostName string
+ // LocationUID locate which variable belong to
+ LocationUID string
+}
+
+func (b LocationVars) filter(data value) (any, error) {
+ var result VariableData
+ // find from host runtime
+ if v, ok := data.Hosts[b.HostName].RuntimeVars[b.LocationUID]; ok {
+ result = mergeVariables(result, v)
+ }
+ // find
+
+ // merge location variable
+ var mergeLocationVarsFunc func(uid string)
+ mergeLocationVarsFunc = func(uid string) {
+ // find value from task
+ if v, ok := data.Hosts[b.HostName].RuntimeVars[uid]; ok {
+ result = mergeVariables(result, v)
+
+ }
+ if loc := findLocation(data.Location, uid); loc != nil {
+ result = mergeVariables(result, loc.Vars)
+ if loc.PUID != "" {
+ mergeLocationVarsFunc(loc.PUID)
+ }
+ }
+ }
+ mergeLocationVarsFunc(b.LocationUID)
+
+ // get value from host
+ result = mergeVariables(result, data.Hosts[b.HostName].Vars)
+
+ // get value from global
+ result = mergeVariables(result, data.getGlobalVars(b.HostName))
+
+ return result, nil
+}
+
+// HostVars get all top variable for a host
+type HostVars struct {
+ HostName string
+}
+
+func (k HostVars) filter(data value) (any, error) {
+ return mergeVariables(data.getGlobalVars(k.HostName), data.Hosts[k.HostName].Vars), nil
+}
+
+// Hostnames from array contains group name or host name
+type Hostnames struct {
+ Name []string
+}
+
+func (g Hostnames) filter(data value) (any, error) {
+ var hs []string
+ for _, n := range g.Name {
+ // add host to hs
+ if _, ok := data.Hosts[n]; ok {
+ hs = append(hs, n)
+ }
+ // add group's host to gs
+ for gn, gv := range convertGroup(data.Inventory) {
+ if gn == n {
+ hs = mergeSlice(hs, gv.([]string))
+ break
+ }
+ }
+
+ // Add the specified host in the specified group to the hs.
+ regex := regexp.MustCompile(`^(.*)\[\d\]$`)
+ if match := regex.FindStringSubmatch(n); match != nil {
+ index, err := strconv.Atoi(match[2])
+ if err != nil {
+ klog.Errorf("convert index %s to int failed: %v", match[2], err)
+ return nil, err
+ }
+ for gn, gv := range data.Inventory.Spec.Groups {
+ if gn == match[1] {
+ hs = append(hs, gv.Hosts[index])
+ break
+ }
+ }
+ }
+ }
+ return hs, nil
+}
+
+const (
+ // FailedExecute If dependency tasks has failed. execute current task. otherwise skip it
+ FailedExecute = "failed-exec"
+ // SucceedExecute If dependency tasks succeeded. execute current task. otherwise skip it
+ SucceedExecute = "succeed-exec"
+ // AlwaysExecute always execute current task.
+ AlwaysExecute = "always-exec"
+)
+
+type DependencyTasks struct {
+ LocationUID string
+}
+
+type DependencyTask struct {
+ Tasks []string
+ Strategy func([]kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase
+}
+
+func (f DependencyTasks) filter(data value) (any, error) {
+ loc := findLocation(data.Location, f.LocationUID)
+ if loc == nil {
+ return nil, fmt.Errorf("cannot found location %s", f.LocationUID)
+
+ }
+ return f.getDependencyLocationUIDS(data, loc)
+}
+
+func (f DependencyTasks) getDependencyLocationUIDS(data value, loc *location) (DependencyTask, error) {
+ if loc.PUID == "" {
+ return DependencyTask{
+ Strategy: func([]kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase {
+ return kubekeyv1alpha1.TaskPhaseRunning
+ },
+ }, nil
+ }
+
+ // if tasks has failed. execute current task.
+ failedExecuteStrategy := func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase {
+ skip := true
+ for _, t := range tasks {
+ if !t.IsComplete() {
+ return kubekeyv1alpha1.TaskPhasePending
+ }
+ if t.IsFailed() {
+ return kubekeyv1alpha1.TaskPhaseRunning
+ }
+ if !t.IsSkipped() {
+ skip = false
+ }
+ }
+ if skip {
+ return kubekeyv1alpha1.TaskPhaseRunning
+ }
+ return kubekeyv1alpha1.TaskPhaseSkipped
+ }
+
+ // If dependency tasks has failed. skip it.
+ succeedExecuteStrategy := func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase {
+ skip := true
+ for _, t := range tasks {
+ if !t.IsComplete() {
+ return kubekeyv1alpha1.TaskPhasePending
+ }
+ if t.IsFailed() {
+ return kubekeyv1alpha1.TaskPhaseSkipped
+ }
+ if !t.IsSkipped() {
+ skip = false
+ }
+ }
+ if skip {
+ return kubekeyv1alpha1.TaskPhaseSkipped
+ }
+ return kubekeyv1alpha1.TaskPhaseRunning
+ }
+
+ // If dependency tasks is not complete. waiting.
+ // If dependency tasks is skipped. skip.
+ alwaysExecuteStrategy := func(tasks []kubekeyv1alpha1.Task) kubekeyv1alpha1.TaskPhase {
+ skip := true
+ for _, t := range tasks {
+ if !t.IsComplete() {
+ return kubekeyv1alpha1.TaskPhasePending
+ }
+ if !t.IsSkipped() {
+ skip = false
+ }
+ }
+ if skip {
+ return kubekeyv1alpha1.TaskPhaseSkipped
+ }
+ return kubekeyv1alpha1.TaskPhaseRunning
+ }
+
+ // Find the parent location and, based on where the current location is within the parent location, retrieve the dependent tasks.
+ ploc := findLocation(data.Location, loc.PUID)
+
+ // location in Block.
+ for i, l := range ploc.Block {
+ if l.UID == loc.UID {
+ // When location is the first element, it is necessary to check the dependency of its parent location.
+ if i == 0 {
+ if data, err := f.getDependencyLocationUIDS(data, ploc); err != nil {
+ return DependencyTask{}, err
+ } else {
+ return data, nil
+ }
+ }
+ // When location is not the first element, dependency location is the preceding element in the same array.
+ return DependencyTask{
+ Tasks: f.findAllTasks(ploc.Block[i-1]),
+ Strategy: succeedExecuteStrategy,
+ }, nil
+ }
+ }
+
+ // location in Rescue
+ for i, l := range ploc.Rescue {
+ if l.UID == loc.UID {
+ // When location is the first element, dependency location is all task of sibling block array.
+ if i == 0 {
+ return DependencyTask{
+ Tasks: f.findAllTasks(ploc.Block[len(ploc.Block)-1]),
+ Strategy: failedExecuteStrategy,
+ }, nil
+ }
+ // When location is not the first element, dependency location is the preceding element in the same array
+ return DependencyTask{
+ Tasks: f.findAllTasks(ploc.Rescue[i-1]),
+ Strategy: succeedExecuteStrategy}, nil
+ }
+ }
+
+ // If location in Always
+ for i, l := range ploc.Always {
+ if l.UID == loc.UID {
+ // When location is the first element, dependency location is all task of sibling block array
+ if i == 0 {
+ return DependencyTask{
+ Tasks: f.findAllTasks(ploc.Block[len(ploc.Block)-1]),
+ Strategy: alwaysExecuteStrategy,
+ }, nil
+ }
+ // When location is not the first element, dependency location is the preceding element in the same array
+ return DependencyTask{
+ Tasks: f.findAllTasks(ploc.Always[i-1]),
+ Strategy: alwaysExecuteStrategy,
+ }, nil
+
+ }
+ }
+
+ return DependencyTask{}, fmt.Errorf("connot find location %s in parent %s", loc.UID, loc.PUID)
+}
+
+func (f DependencyTasks) findAllTasks(loc location) []string {
+ if len(loc.Block) == 0 {
+ return []string{loc.UID}
+ }
+ var result = make([]string, 0)
+ for _, l := range loc.Block {
+ result = append(result, f.findAllTasks(l)...)
+ }
+ for _, l := range loc.Rescue {
+ result = append(result, f.findAllTasks(l)...)
+ }
+ for _, l := range loc.Always {
+ result = append(result, f.findAllTasks(l)...)
+ }
+
+ return result
+}
+
+type MergeOption interface {
+ mergeTo(data *value) error
+}
+
+// HostMerge merge variable to host
+type HostMerge struct {
+ // HostName of host
+ HostNames []string
+ // LocationVars to find block. Only merge the last level block.
+ //LocationVars []string
+ LocationUID string
+ // Data to merge
+ Data VariableData
+}
+
+func (h HostMerge) mergeTo(v *value) error {
+ for _, name := range h.HostNames {
+ hv := v.Hosts[name]
+ if h.LocationUID == "" { // merge to host var
+ hv.Vars = mergeVariables(h.Data, v.Hosts[name].Vars)
+ } else { // merge to host runtime
+ if hv.RuntimeVars == nil {
+ hv.RuntimeVars = make(map[string]VariableData)
+ }
+ hv.RuntimeVars[h.LocationUID] = mergeVariables(v.Hosts[name].RuntimeVars[h.LocationUID], h.Data)
+ }
+ v.Hosts[name] = hv
+ }
+ return nil
+}
+
+type LocationType string
+
+const (
+ BlockLocation LocationType = "block"
+ AlwaysLocation LocationType = "always"
+ RescueLocation LocationType = "rescue"
+)
+
+// LocationMerge merge variable to location
+type LocationMerge struct {
+ Uid string
+ ParentID string
+ Type LocationType
+ Name string
+ Vars VariableData
+}
+
+func (t LocationMerge) mergeTo(v *value) error {
+ if t.ParentID == "" {
+ v.Location = append(v.Location, location{
+ Name: t.Name,
+ PUID: t.ParentID,
+ UID: t.Uid,
+ Vars: t.Vars,
+ })
+ return nil
+ }
+ // find parent graph
+ parentLocation := findLocation(v.Location, t.ParentID)
+ if parentLocation == nil {
+ return fmt.Errorf("cannot find parent location %s", t.ParentID)
+ }
+
+ switch t.Type {
+ case BlockLocation:
+ for _, loc := range parentLocation.Block {
+ if loc.UID == t.Uid {
+ klog.Warningf("task graph %s already exist", t.Uid)
+ return nil
+ }
+ }
+ parentLocation.Block = append(parentLocation.Block, location{
+ Name: t.Name,
+ PUID: t.ParentID,
+ UID: t.Uid,
+ Vars: t.Vars,
+ })
+ case AlwaysLocation:
+ for _, loc := range parentLocation.Always {
+ if loc.UID == t.Uid {
+ klog.Warningf("task graph %s already exist", t.Uid)
+ return nil
+ }
+ }
+ parentLocation.Always = append(parentLocation.Always, location{
+ Name: t.Name,
+ PUID: t.ParentID,
+ UID: t.Uid,
+ Vars: t.Vars,
+ })
+ case RescueLocation:
+ for _, loc := range parentLocation.Rescue {
+ if loc.UID == t.Uid {
+ klog.Warningf("task graph %s already exist", t.Uid)
+ return nil
+ }
+ }
+ parentLocation.Rescue = append(parentLocation.Rescue, location{
+ Name: t.Name,
+ PUID: t.ParentID,
+ UID: t.Uid,
+ Vars: t.Vars,
+ })
+ default:
+ return fmt.Errorf("unknown LocationType. only support block,always,rescue ")
+ }
+
+ return nil
+}
diff --git a/scripts/ci-lint-dockerfiles.sh b/scripts/ci-lint-dockerfiles.sh
new file mode 100755
index 00000000..c62fb3bd
--- /dev/null
+++ b/scripts/ci-lint-dockerfiles.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright 2022 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+HADOLINT_VER=${1:-latest}
+HADOLINT_FAILURE_THRESHOLD=${2:-warning}
+
+FILES=$(find -- * -name Dockerfile)
+while read -r file; do
+ echo "Linting: ${file}"
+ # Configure the linter to fail for warnings and errors. Can be set to: error | warning | info | style | ignore | none
+ docker run --rm -i ghcr.io/hadolint/hadolint:"${HADOLINT_VER}" hadolint --failure-threshold "${HADOLINT_FAILURE_THRESHOLD}" - < "${file}"
+done <<< "${FILES}"
diff --git a/scripts/docker-install.sh b/scripts/docker-install.sh
new file mode 100755
index 00000000..9bb70410
--- /dev/null
+++ b/scripts/docker-install.sh
@@ -0,0 +1,526 @@
+#!/bin/sh
+set -e
+# Docker CE for Linux installation script
+#
+# See https://docs.docker.com/engine/install/ for the installation steps.
+#
+# This script is meant for quick & easy install via:
+# $ curl -fsSL https://get.docker.com -o get-docker.sh
+# $ sh get-docker.sh
+#
+# For test builds (ie. release candidates):
+# $ curl -fsSL https://test.docker.com -o test-docker.sh
+# $ sh test-docker.sh
+#
+# NOTE: Make sure to verify the contents of the script
+# you downloaded matches the contents of install.sh
+# located at https://github.com/docker/docker-install
+# before executing.
+#
+# Git commit from https://github.com/docker/docker-install when
+# the script was uploaded (Should only be modified by upload job):
+SCRIPT_COMMIT_SHA="7cae5f8b0decc17d6571f9f52eb840fbc13b2737"
+
+
+# The channel to install from:
+# * nightly
+# * test
+# * stable
+# * edge (deprecated)
+DEFAULT_CHANNEL_VALUE="stable"
+if [ -z "$CHANNEL" ]; then
+ CHANNEL=$DEFAULT_CHANNEL_VALUE
+fi
+
+#DEFAULT_DOWNLOAD_URL="https://download.docker.com"
+DEFAULT_DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
+if [ -z "$DOWNLOAD_URL" ]; then
+ DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
+fi
+
+DEFAULT_REPO_FILE="docker-ce.repo"
+if [ -z "$REPO_FILE" ]; then
+ REPO_FILE="$DEFAULT_REPO_FILE"
+fi
+
+mirror=''
+DRY_RUN=${DRY_RUN:-}
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --mirror)
+ mirror="$2"
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN=1
+ ;;
+ --*)
+ echo "Illegal option $1"
+ ;;
+ esac
+ shift $(( $# > 0 ? 1 : 0 ))
+done
+
+case "$mirror" in
+ Aliyun)
+ DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
+ ;;
+ AzureChinaCloud)
+ DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
+ ;;
+esac
+
+# docker-ce-rootless-extras is packaged since Docker 20.10.0
+has_rootless_extras="1"
+if echo "$VERSION" | grep -q '^1'; then
+ has_rootless_extras=
+fi
+
+command_exists() {
+ command -v "$@" > /dev/null 2>&1
+}
+
+is_dry_run() {
+ if [ -z "$DRY_RUN" ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+is_wsl() {
+ case "$(uname -r)" in
+ *microsoft* ) true ;; # WSL 2
+ *Microsoft* ) true ;; # WSL 1
+ * ) false;;
+ esac
+}
+
+is_darwin() {
+ case "$(uname -s)" in
+ *darwin* ) true ;;
+ *Darwin* ) true ;;
+ * ) false;;
+ esac
+}
+
+deprecation_notice() {
+ distro=$1
+ date=$2
+ echo
+ echo "DEPRECATION WARNING:"
+ echo " The distribution, $distro, will no longer be supported in this script as of $date."
+ echo " If you feel this is a mistake please submit an issue at https://github.com/docker/docker-install/issues/new"
+ echo
+ sleep 10
+}
+
+get_distribution() {
+ lsb_dist=""
+ # Every system that we officially support has /etc/os-release
+ if [ -r /etc/os-release ]; then
+ lsb_dist="$(. /etc/os-release && echo "$ID")"
+ fi
+ # Returning an empty string here should be alright since the
+ # case statements don't act unless you provide an actual value
+ echo "$lsb_dist"
+}
+
+add_debian_backport_repo() {
+ debian_version="$1"
+ backports="deb http://ftp.debian.org/debian $debian_version-backports main"
+ if ! grep -Fxq "$backports" /etc/apt/sources.list; then
+ (set -x; $sh_c "echo \"$backports\" >> /etc/apt/sources.list")
+ fi
+}
+
+echo_docker_as_nonroot() {
+ if is_dry_run; then
+ return
+ fi
+ if command_exists docker && [ -e /var/run/docker.sock ]; then
+ (
+ set -x
+ $sh_c 'docker version'
+ ) || true
+ fi
+
+ # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
+ echo
+ echo "================================================================================"
+ echo
+ if [ -n "$has_rootless_extras" ]; then
+ echo "To run Docker as a non-privileged user, consider setting up the"
+ echo "Docker daemon in rootless mode for your user:"
+ echo
+ echo " dockerd-rootless-setuptool.sh install"
+ echo
+ echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
+ echo
+ fi
+ echo
+ echo "To run the Docker daemon as a fully privileged service, but granting non-root"
+ echo "users access, refer to https://docs.docker.com/go/daemon-access/"
+ echo
+ echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
+ echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
+ echo " documentation for details: https://docs.docker.com/go/attack-surface/"
+ echo
+ echo "================================================================================"
+ echo
+}
+
+# Check if this is a forked Linux distro
+check_forked() {
+
+ # Check for lsb_release command existence, it usually exists in forked distros
+ if command_exists lsb_release; then
+ # Check if the `-u` option is supported
+ set +e
+ lsb_release -a -u > /dev/null 2>&1
+ lsb_release_exit_code=$?
+ set -e
+
+ # Check if the command has exited successfully, it means we're in a forked distro
+ if [ "$lsb_release_exit_code" = "0" ]; then
+ # Print info about current distro
+ cat <<-EOF
+ You're using '$lsb_dist' version '$dist_version'.
+ EOF
+
+ # Get the upstream release info
+ lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
+ dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
+
+ # Print info about upstream distro
+ cat <<-EOF
+ Upstream release is '$lsb_dist' version '$dist_version'.
+ EOF
+ else
+ if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
+ if [ "$lsb_dist" = "osmc" ]; then
+ # OSMC runs Raspbian
+ lsb_dist=raspbian
+ else
+ # We're Debian and don't even know it!
+ lsb_dist=debian
+ fi
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8|'Kali Linux 2')
+ dist_version="jessie"
+ ;;
+ esac
+ fi
+ fi
+ fi
+}
+
+semverParse() {
+ major="${1%%.*}"
+ minor="${1#$major.}"
+ minor="${minor%%.*}"
+ patch="${1#$major.$minor.}"
+ patch="${patch%%[-.]*}"
+}
+
+do_install() {
+ echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
+
+ if command_exists docker; then
+ docker_version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
+ MAJOR_W=1
+ MINOR_W=10
+
+ semverParse "$docker_version"
+
+ shouldWarn=0
+ if [ "$major" -lt "$MAJOR_W" ]; then
+ shouldWarn=1
+ fi
+
+ if [ "$major" -le "$MAJOR_W" ] && [ "$minor" -lt "$MINOR_W" ]; then
+ shouldWarn=1
+ fi
+
+ cat >&2 <<-'EOF'
+ Warning: the "docker" command appears to already exist on this system.
+
+ If you already have Docker installed, this script can cause trouble, which is
+ why we're displaying this warning and provide the opportunity to cancel the
+ installation.
+
+ If you installed the current Docker package using this script and are using it
+ EOF
+
+ if [ $shouldWarn -eq 1 ]; then
+ cat >&2 <<-'EOF'
+ again to update Docker, we urge you to migrate your image store before upgrading
+ to v1.10+.
+
+ You can find instructions for this here:
+ https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
+ EOF
+ else
+ cat >&2 <<-'EOF'
+ again to update Docker, you can safely ignore this message.
+ EOF
+ fi
+
+ cat >&2 <<-'EOF'
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ user="$(id -un 2>/dev/null || true)"
+
+ sh_c='sh -c'
+ if [ "$user" != 'root' ]; then
+ if command_exists sudo; then
+ sh_c='sudo -E sh -c'
+ elif command_exists su; then
+ sh_c='su -c'
+ else
+ cat >&2 <<-'EOF'
+ Error: this installer needs the ability to run commands as root.
+ We are unable to find either "sudo" or "su" available to make this happen.
+ EOF
+ exit 1
+ fi
+ fi
+
+ if is_dry_run; then
+ sh_c="echo"
+ fi
+
+ # perform some very rudimentary platform detection
+ lsb_dist=$( get_distribution )
+ lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
+
+ if is_wsl; then
+ echo
+ echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ cat >&2 <<-'EOF'
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ case "$lsb_dist" in
+
+ ubuntu)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --codename | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
+ dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
+ fi
+ ;;
+
+ debian|raspbian)
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8)
+ dist_version="jessie"
+ ;;
+ esac
+ ;;
+
+ centos|rhel)
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ *)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --release | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ esac
+
+ # Check if this is a forked Linux distro
+ check_forked
+
+ # Run setup for each distro accordingly
+ case "$lsb_dist" in
+ ubuntu|debian|raspbian)
+ pre_reqs="apt-transport-https ca-certificates curl"
+ if [ "$lsb_dist" = "debian" ]; then
+ # libseccomp2 does not exist for debian jessie main repos for aarch64
+ if [ "$(uname -m)" = "aarch64" ] && [ "$dist_version" = "jessie" ]; then
+ add_debian_backport_repo "$dist_version"
+ fi
+ fi
+
+ if ! command -v gpg > /dev/null; then
+ pre_reqs="$pre_reqs gnupg"
+ fi
+ apt_repo="deb [arch=$(dpkg --print-architecture)] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c 'apt-get update -qq >/dev/null'
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
+ $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | apt-key add -qq - >/dev/null"
+ $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
+ $sh_c 'apt-get update -qq >/dev/null'
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g").*-0~$lsb_dist"
+ search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
+ echo
+ exit 1
+ fi
+ search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ # Don't insert an = for cli_pkg_version, we'll just include it later
+ cli_pkg_version="$($sh_c "$search_command")"
+ pkg_version="=$pkg_version"
+ fi
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ if [ -n "$cli_pkg_version" ]; then
+ $sh_c "apt-get install -y -qq --no-install-recommends docker-ce-cli=$cli_pkg_version >/dev/null"
+ fi
+ $sh_c "apt-get install -y -qq --no-install-recommends docker-ce$pkg_version >/dev/null"
+ # shellcheck disable=SC2030
+ if [ -n "$has_rootless_extras" ]; then
+ # Install docker-ce-rootless-extras without "--no-install-recommends", so as to install slirp4netns when available
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq docker-ce-rootless-extras$pkg_version >/dev/null"
+ fi
+ )
+ echo_docker_as_nonroot
+ exit 0
+ ;;
+ centos|fedora|rhel)
+ yum_repo="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
+ if ! curl -Ifs "$yum_repo" > /dev/null; then
+ echo "Error: Unable to curl repository file $yum_repo, is it valid?"
+ exit 1
+ fi
+ if [ "$lsb_dist" = "fedora" ]; then
+ pkg_manager="dnf"
+ config_manager="dnf config-manager"
+ enable_channel_flag="--set-enabled"
+ disable_channel_flag="--set-disabled"
+ pre_reqs="dnf-plugins-core"
+ pkg_suffix="fc$dist_version"
+ else
+ pkg_manager="yum"
+ config_manager="yum-config-manager"
+ enable_channel_flag="--enable"
+ disable_channel_flag="--disable"
+ pre_reqs="yum-utils"
+ pkg_suffix="el"
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "$pkg_manager install -y -q $pre_reqs"
+ $sh_c "$config_manager --add-repo $yum_repo"
+
+ if [ "$CHANNEL" != "stable" ]; then
+ $sh_c "$config_manager $disable_channel_flag docker-ce-*"
+ $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
+ fi
+ $sh_c "$pkg_manager makecache"
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
+ search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
+ echo
+ exit 1
+ fi
+ search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
+ cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
+ # Cut out the epoch and prefix with a '-'
+ pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
+ fi
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ # install the correct cli version first
+ if [ -n "$cli_pkg_version" ]; then
+ $sh_c "$pkg_manager install -y -q docker-ce-cli-$cli_pkg_version"
+ fi
+ $sh_c "$pkg_manager install -y -q docker-ce$pkg_version"
+ # shellcheck disable=SC2031
+ if [ -n "$has_rootless_extras" ]; then
+ $sh_c "$pkg_manager install -y -q docker-ce-rootless-extras$pkg_version"
+ fi
+ )
+ exit 0
+ ;;
+ *)
+ if [ -z "$lsb_dist" ]; then
+ if is_darwin; then
+ echo
+ echo "ERROR: Unsupported operating system 'macOS'"
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ exit 1
+ fi
+ fi
+ echo
+ echo "ERROR: Unsupported distribution '$lsb_dist'"
+ echo
+ exit 1
+ ;;
+ esac
+
+ exit 1
+}
+
+# wrapped up in a function so that we have some protection against only getting
+# half the file during "curl | sh"
+do_install
diff --git a/scripts/downloadKubekey.sh b/scripts/downloadKubekey.sh
new file mode 100755
index 00000000..8fd9b947
--- /dev/null
+++ b/scripts/downloadKubekey.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Copyright 2020 The KubeSphere Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ISLINUX=true
+OSTYPE="linux"
+
+if [ "x$(uname)" != "xLinux" ]; then
+ echo ""
+ echo 'Warning: Non-Linux operating systems are not supported! After downloading, please copy the tar.gz file to linux.'
+ ISLINUX=false
+fi
+
+# Fetch latest version
+if [ "x${VERSION}" = "x" ]; then
+ VERSION="$(curl -sL https://api.github.com/repos/kubesphere/kubekey/releases |
+ grep -o 'download/v[0-9]*.[0-9]*.[0-9]*/' |
+ sort --version-sort |
+ tail -1 | awk -F'/' '{ print $2}')"
+ VERSION="${VERSION##*/}"
+fi
+
+if [ -z "${ARCH}" ]; then
+ case "$(uname -m)" in
+ x86_64)
+ ARCH=amd64
+ ;;
+ armv8*)
+ ARCH=arm64
+ ;;
+ aarch64*)
+ ARCH=arm64
+ ;;
+ *)
+ echo "${ARCH}, isn't supported"
+ exit 1
+ ;;
+ esac
+fi
+
+if [ "x${VERSION}" = "x" ]; then
+ echo "Unable to get latest Kubekey version. Set VERSION env var and re-run. For example: export VERSION=v1.0.0"
+ echo ""
+ exit
+fi
+
+DOWNLOAD_URL="https://github.com/kubesphere/kubekey/releases/download/${VERSION}/kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+if [ "x${KKZONE}" = "xcn" ]; then
+ DOWNLOAD_URL="https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/${VERSION}/kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+fi
+
+echo ""
+echo "Downloading kubekey ${VERSION} from ${DOWNLOAD_URL} ..."
+echo ""
+
+curl -fsLO "$DOWNLOAD_URL"
+if [ $? -ne 0 ]; then
+ echo ""
+ echo "Failed to download Kubekey ${VERSION} !"
+ echo ""
+ echo "Please verify the version you are trying to download."
+ echo ""
+ exit
+fi
+
+if [ ${ISLINUX} = true ]; then
+ filename="kubekey-${VERSION}-${OSTYPE}-${ARCH}.tar.gz"
+ ret='0'
+ command -v tar >/dev/null 2>&1 || { ret='1'; }
+ if [ "$ret" -eq 0 ]; then
+ tar -xzf "${filename}"
+ else
+ echo "Kubekey ${VERSION} Download Complete!"
+ echo ""
+ echo "Try to unpack the ${filename} failed."
+ echo "tar: command not found, please unpack the ${filename} manually."
+ exit
+ fi
+fi
+
+echo ""
+echo "Kubekey ${VERSION} Download Complete!"
+echo ""
+
diff --git a/scripts/go_install.sh b/scripts/go_install.sh
new file mode 100755
index 00000000..a07b8e0f
--- /dev/null
+++ b/scripts/go_install.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z "${1}" ]; then
+ echo "must provide module as first parameter"
+ exit 1
+fi
+
+if [ -z "${2}" ]; then
+ echo "must provide binary name as second parameter"
+ exit 1
+fi
+
+if [ -z "${3}" ]; then
+ echo "must provide version as third parameter"
+ exit 1
+fi
+
+if [ -z "${GOBIN}" ]; then
+ echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory."
+ exit 1
+fi
+
+rm -f "${GOBIN}/${2}"* || true
+
+# install the golang module specified as the first argument
+go install "${1}@${3}"
+mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
+ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
diff --git a/scripts/harborCreateRegistriesAndReplications.sh b/scripts/harborCreateRegistriesAndReplications.sh
new file mode 100644
index 00000000..86375c48
--- /dev/null
+++ b/scripts/harborCreateRegistriesAndReplications.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+
+function createRegistries() {
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries" -d "{\"name\": \"master1_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries" -d "{\"name\": \"master2_2_master3\", \"type\": \"harbor\", \"url\":\"https://${master3_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master1\", \"type\": \"harbor\", \"url\":\"https://${master1_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+ # create registry
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries" -d "{\"name\": \"master3_2_master2\", \"type\": \"harbor\", \"url\":\"https://${master2_Address}:7443\", \"credential\": {\"access_key\": \"${Harbor_User}\", \"access_secret\": \"${Harbor_Passwd}\"}, \"insecure\": true}"
+
+}
+
+function listRegistries() {
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/registries"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/registries"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/registries"
+
+}
+
+function createReplication() {
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master1_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master1_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master1_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master2_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master2_2_master3\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master2_2_master3\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master1\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 1, \"name\": \"master3_2_master1\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+ curl -k -u $Harbor_UserPwd -X POST -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies" -d "{\"name\": \"master3_2_master2\", \"enabled\": true, \"deletion\":true, \"override\":true, \"replicate_deletion\":true, \"dest_registry\":{ \"id\": 2, \"name\": \"master3_2_master2\"}, \"trigger\": {\"type\": \"event_based\"}, \"dest_namespace_replace_count\":1 }"
+}
+
+function listReplications() {
+
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master1_Address}/api/v2.0/replication/policies"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master2_Address}/api/v2.0/replication/policies"
+ curl -k -u $Harbor_UserPwd -X GET -H "Content-Type: application/json" "https://${Harbor_master3_Address}/api/v2.0/replication/policies"
+}
+
+#### main ######
+Harbor_master1_Address=master1:7443
+master1_Address=192.168.122.61
+Harbor_master2_Address=master2:7443
+master2_Address=192.168.122.62
+Harbor_master3_Address=master3:7443
+master3_Address=192.168.122.63
+Harbor_User=admin #登录Harbor的用户
+Harbor_Passwd="Harbor12345" #登录Harbor的用户密码
+Harbor_UserPwd="$Harbor_User:$Harbor_Passwd"
+
+
+createRegistries
+listRegistries
+createReplication
+listReplications
diff --git a/scripts/harbor_keepalived/check_harbor.sh b/scripts/harbor_keepalived/check_harbor.sh
new file mode 100644
index 00000000..0a3fb0bd
--- /dev/null
+++ b/scripts/harbor_keepalived/check_harbor.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+#count=$(docker-compose -f /opt/harbor/docker-compose.yml ps -a|grep healthy|wc -l)
+# 不能频繁调用docker-compose 否则会有非常多的临时目录被创建:/tmp/_MEI*
+count=$(docker ps |grep goharbor|grep healthy|wc -l)
+status=$(ss -tlnp|grep -w 443|wc -l)
+if [ $count -ne 11 -a ];then
+ exit 8
+elif [ $status -lt 2 ];then
+ exit 9
+else
+ exit 0
+fi
diff --git a/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml b/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml
new file mode 100644
index 00000000..7328f96a
--- /dev/null
+++ b/scripts/harbor_keepalived/docker-compose-keepalived-backup.yaml
@@ -0,0 +1,14 @@
+version: '3.8'
+
+# Docker-Compose 单容器使用参考 YAML 配置文件
+# 更多配置参数请参考镜像 README.md 文档中说明
+services:
+ keepalived:
+ image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
+ privileged: true
+ network_mode: host
+ volumes:
+ - ./keepalived-backup.conf:/srv/conf/keepalived/keepalived.conf
+ - ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
+ container_name: keepalived
+ restart: on-failure
diff --git a/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml b/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml
new file mode 100644
index 00000000..64f35aee
--- /dev/null
+++ b/scripts/harbor_keepalived/docker-compose-keepalived-master.yaml
@@ -0,0 +1,14 @@
+version: '3.8'
+
+# Docker-Compose 单容器使用参考 YAML 配置文件
+# 更多配置参数请参考镜像 README.md 文档中说明
+services:
+ keepalived:
+ image: 'registry.cn-shenzhen.aliyuncs.com/colovu/keepalived:2.1'
+ privileged: true
+ network_mode: host
+ volumes:
+ - ./keepalived-master.conf:/srv/conf/keepalived/keepalived.conf
+ - ./check_harbor.sh:/srv/conf/keepalived/check_harbor.sh
+ container_name: keepalived
+ restart: on-failure
diff --git a/scripts/harbor_keepalived/keepalived-backup.conf b/scripts/harbor_keepalived/keepalived-backup.conf
new file mode 100644
index 00000000..be916c90
--- /dev/null
+++ b/scripts/harbor_keepalived/keepalived-backup.conf
@@ -0,0 +1,31 @@
+vrrp_script check_harbor {
+ script "/srv/conf/keepalived/check_harbor.sh"
+ interval 10 # 间隔时间,单位为秒,默认1秒
+ fall 2 # 脚本几次失败转换为失败
+ rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
+ timeout 5
+ init_fail
+}
+global_defs {
+ script_user root
+ router_id harbor-ha
+ enable_script_security
+ lvs_sync_daemon ens3 VI_1
+}
+vrrp_instance VI_1 {
+ state BACKUP
+ interface ens3
+ virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
+ priority 50
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass k8s-test
+ }
+ virtual_ipaddress {
+ 192.168.122.59
+ }
+ track_script {
+ check_harbor
+ }
+}
diff --git a/scripts/harbor_keepalived/keepalived-master.conf b/scripts/harbor_keepalived/keepalived-master.conf
new file mode 100644
index 00000000..de3566e4
--- /dev/null
+++ b/scripts/harbor_keepalived/keepalived-master.conf
@@ -0,0 +1,31 @@
+vrrp_script check_harbor {
+ script "/srv/conf/keepalived/check_harbor.sh"
+ interval 10 # 间隔时间,单位为秒,默认1秒
+ fall 2 # 脚本几次失败转换为失败
+ rise 2 # 脚本连续监测成功后,把服务器从失败标记为成功的次数
+ timeout 5
+ init_fail
+}
+global_defs {
+ script_user root
+ router_id harbor-ha
+ enable_script_security
+ lvs_sync_daemon ens3 VI_1
+}
+vrrp_instance VI_1 {
+ state MASTER
+ interface ens3
+ virtual_router_id 31 # 如果同一个局域网中有多套keepalive,那么要保证该id唯一
+ priority 100
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass k8s-test
+ }
+ virtual_ipaddress {
+ 192.168.122.59
+ }
+ track_script {
+ check_harbor
+ }
+}
diff --git a/version/version.go b/version/version.go
new file mode 100644
index 00000000..77388e2f
--- /dev/null
+++ b/version/version.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version implements version handling code.
+package version
+
+import (
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "runtime"
+)
+
+var (
+ gitMajor string // major version, always numeric
+ gitMinor string // minor version, numeric possibly followed by "+"
+ gitVersion string // semantic version, derived by build scripts
+ gitCommit string // sha1 from git, output of $(git rev-parse HEAD)
+ gitTreeState string // state of git tree, either "clean" or "dirty"
+ buildDate string // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
+
+// Info exposes information about the version used for the current running code.
+type Info struct {
+ Major string `json:"major,omitempty"`
+ Minor string `json:"minor,omitempty"`
+ GitVersion string `json:"gitVersion,omitempty"`
+ GitCommit string `json:"gitCommit,omitempty"`
+ GitTreeState string `json:"gitTreeState,omitempty"`
+ BuildDate string `json:"buildDate,omitempty"`
+ GoVersion string `json:"goVersion,omitempty"`
+ Compiler string `json:"compiler,omitempty"`
+ Platform string `json:"platform,omitempty"`
+}
+
+// Get returns an Info object with all the information about the current running code.
+func Get() Info {
+ return Info{
+ Major: gitMajor,
+ Minor: gitMinor,
+ GitVersion: gitVersion,
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ BuildDate: buildDate,
+ GoVersion: runtime.Version(),
+ Compiler: runtime.Compiler,
+ Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+ }
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+ return info.GitVersion
+}
+
+// ParseFilesSha256 Load files' sha256 from components.json
+func ParseFilesSha256(componentsJSON []byte) (map[string]map[string]map[string]string, error) {
+ m := make(map[string]map[string]map[string]string)
+ err := json.Unmarshal(componentsJSON, &m)
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+}