fix: add operator

Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
joyceliu 2024-05-23 18:09:33 +08:00
parent 34b4d1b8c9
commit 2eed0820d7
56 changed files with 1456 additions and 1093 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
bin
example
exp

3
.gitignore vendored
View File

@ -4,8 +4,6 @@
*.so
*.dylib
*.tmp
bin
hack/tools/bin
# Test binary, build with `go test -c`
*.test
@ -39,3 +37,4 @@ tmp
# Used by current object
/example/test/
/_output/

View File

@ -1,61 +0,0 @@
# Build architecture
ARG ARCH
ARG builder_image
# Download dependencies
FROM alpine:3.19.0 as base_os_context
ENV OUTDIR=/out
RUN mkdir -p ${OUTDIR}/usr/local/bin/
WORKDIR /tmp
RUN apk add --no-cache ca-certificates
# Build the manager binary
FROM ${builder_image} as builder
# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
ARG goproxy=https://goproxy.cn,direct
ENV GOPROXY=$goproxy
WORKDIR /workspace
COPY go.mod go.mod
COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN --mount=type=cache,target=/go/pkg/mod \
go mod download
# Copy the go source
COPY ./ ./
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go build -o controller-manager cmd/controller-manager/controller_manager.go
# Build
ARG ARCH
ARG LDFLAGS
# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \
go build -o controller-manager cmd/controller-manager/controller_manager.go
FROM --platform=${ARCH} alpine:3.19.0
WORKDIR /
RUN mkdir -p /var/lib/kubekey/rootfs
COPY --from=base_os_context /out/ /
COPY --from=builder /workspace/controller-manager /usr/local/bin
ENTRYPOINT ["sh"]

755
Makefile
View File

@ -6,9 +6,10 @@ SHELL:=/usr/bin/env bash
#
# Go.
#
GO_VERSION ?= 1.20
GO_VERSION ?= 1.22
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
GOARCH ?= $(shell go env GOARCH)
GOOS ?= $(shell go env GOOS)
# Use GOPROXY environment variable if set
GOPROXY := $(shell go env GOPROXY)
ifeq ($(GOPROXY),)
@ -17,25 +18,38 @@ endif
export GOPROXY
# Active module mode, as we use go modules to manage dependencies
export GO111MODULE=on
#export GO111MODULE=on
# This option is for running docker manifest command
export DOCKER_CLI_EXPERIMENTAL := enabled
#export DOCKER_CLI_EXPERIMENTAL := enabled
#
# Directories.
#
# Full directory of where the Makefile resides
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
EXP_DIR := exp
BIN_DIR := bin
#EXP_DIR := exp
TEST_DIR := test
TOOLS_DIR := hack/tools
TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))
#BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))
E2E_FRAMEWORK_DIR := $(TEST_DIR)/framework
GO_INSTALL := ./scripts/go_install.sh
export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)
# output
OUTPUT_DIR := $(abspath $(ROOT_DIR)/_output)
BIN_DIR := $(OUTPUT_DIR)/bin
#ARTIFACTS ?= ${OUTPUT_DIR}/_artifacts
dirs := $(OUTPUT_DIR) $(BIN_DIR)
$(foreach dir, $(dirs), \
$(if $(shell [ -d $(dir) ] && echo 1 || echo 0),, \
$(shell mkdir -p $(dir)) \
) \
)
export PATH := $(abspath $(BIN_DIR)):$(PATH)
#
# Binaries.
@ -43,37 +57,53 @@ export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)
# Note: Need to use abspath so we can invoke these from subdirectories
KUSTOMIZE_VER := v4.5.2
KUSTOMIZE_BIN := kustomize
KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
KUSTOMIZE := $(abspath $(BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4
SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9
SETUP_ENVTEST_VER := v0.0.0-20240521074430-fbb7d370bebc
SETUP_ENVTEST_BIN := setup-envtest
SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST := $(abspath $(BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
CONTROLLER_GEN_VER := v0.13.0
CONTROLLER_GEN_VER := v0.15.0
CONTROLLER_GEN_BIN := controller-gen
CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
CONTROLLER_GEN := $(abspath $(BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
GOTESTSUM_VER := v1.6.4
GOTESTSUM_BIN := gotestsum
GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER))
GOTESTSUM := $(abspath $(BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER))
GOTESTSUM_PKG := gotest.tools/gotestsum
HADOLINT_VER := v2.10.0
HADOLINT_FAILURE_THRESHOLD = warning
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN))
GOLANGCI_LINT := $(abspath $(BIN_DIR)/$(GOLANGCI_LINT_BIN))
#
# Docker.
#
DOCKERCMD ?= $(shell which docker)
DOCKER_BUILD_ENV = DOCKER_BUILDKIT=1
DOCKER_BUILD ?= $(DOCKER_BUILD_ENV) $(DOCKERCMD) buildx build
PLATFORM ?= linux/amd64,linux/arm64
DOCKER_PUSH ?= $(DOCKER_BUILD) --platform $(PLATFORM) --push
# Define Docker related variables. Releases should modify and double check these vars.
REGISTRY ?= docker.io/kubespheredev
PROD_REGISTRY ?= docker.io/kubesphere
REGISTRY ?= hub.kubesphere.com.cn/kubekey
#REGISTRY ?= docker.io/kubespheredev
#PROD_REGISTRY ?= docker.io/kubesphere
# capkk
CAPKK_IMAGE_NAME ?= capkk-controller
CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
#CAPKK_IMAGE_NAME ?= capkk-controller
#CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
# controller-manager
OPERATOR_IMAGE_NAME ?= controller-manager
OPERATOR_CONTROLLER_IMG ?= $(REGISTRY)/$(OPERATOR_IMAGE_NAME)
# executor
EXECUTOR_IMAGE_NAME ?= executor
EXECUTOR_CONTROLLER_IMG ?= $(REGISTRY)/$(EXECUTOR_IMAGE_NAME)
# bootstrap
K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
@ -86,27 +116,27 @@ K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
TAG ?= dev
ARCH ?= $(shell go env GOARCH)
ALL_ARCH = amd64 arm arm64 ppc64le s390x
#ALL_ARCH = amd64 arm arm64 ppc64le s390x
# Allow overriding the imagePullPolicy
PULL_POLICY ?= Always
#PULL_POLICY ?= Always
# Hosts running SELinux need :z added to volume mounts
SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
ifeq ($(SELINUX_ENABLED),1)
DOCKER_VOL_OPTS?=:z
endif
#SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
#
#ifeq ($(SELINUX_ENABLED),1)
# DOCKER_VOL_OPTS?=:z
#endif
# Set build time variables including version details
LDFLAGS := $(shell hack/version.sh)
# Set kk build tags
BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
#BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
BUILDTAGS ?= builtin
.PHONY: all
all: test managers
#.PHONY: all
#all: test managers
.PHONY: help
help: ## Display this help.
@ -114,86 +144,29 @@ help: ## Display this help.
## --------------------------------------
## Generate / Manifests
## --------------------------------------
## --------------------------------------:
##@ generate:
ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane
#ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane
.PHONY: generate
generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
$(MAKE) generate-modules generate-manifests generate-go-deepcopy
$(MAKE) generate-go-deepcopy-kubekey generate-manifests-kubekey
.PHONY: generate-manifests
generate-manifests: ## Run all generate-manifest-* targets
$(MAKE) $(addprefix generate-manifests-,$(ALL_GENERATE_MODULES))
.PHONY: generate-manifests-capkk
generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases"
$(CONTROLLER_GEN) \
paths=./api/... \
paths=./controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./config/crd/bases \
output:webhook:dir=./config/webhook \
webhook
.PHONY: generate-manifests-k3s-bootstrap
generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./bootstrap/k3s/api/... \
paths=./bootstrap/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./bootstrap/k3s/config/crd/bases \
output:rbac:dir=./bootstrap/k3s/config/rbac \
output:webhook:dir=./bootstrap/k3s/config/webhook \
webhook
.PHONY: generate-manifests-k3s-control-plane
generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./controlplane/k3s/api/... \
paths=./controlplane/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./controlplane/k3s/config/crd/bases \
output:rbac:dir=./controlplane/k3s/config/rbac \
output:webhook:dir=./controlplane/k3s/config/webhook \
webhook
.PHONY: generate-go-deepcopy
generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets
$(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES))
.PHONY: generate-go-deepcopy-capkk
generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for capkk
$(MAKE) clean-generated-deepcopy SRC_DIRS="./api"
.PHONY: generate-go-deepcopy-kubekey
generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
$(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./api/... \
paths=./pkg/apis/... \
.PHONY: generate-go-deepcopy-k3s-bootstrap
generate-go-deepcopy-k3s-bootstrap: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-bootstrap
$(MAKE) clean-generated-deepcopy SRC_DIRS="./bootstrap/k3s/api"
.PHONY: generate-manifests-kubekey
generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./bootstrap/k3s/api/... \
.PHONY: generate-go-deepcopy-k3s-control-plane
generate-go-deepcopy-k3s-control-plane: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-control-plane
$(MAKE) clean-generated-deepcopy SRC_DIRS="./controlplane/k3s/api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./controlplane/k3s/api/... \
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
go mod tidy
paths=./pkg/apis/... \
crd \
output:crd:dir=./config/helm/crds/
## --------------------------------------
## Lint / Verify
@ -240,58 +213,78 @@ verify-gen: generate ## Verify go generated files are up to date
##@ build:
.PHONY: kk
kk:
CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk github.com/kubesphere/kubekey/v3/cmd/kk;
kk: ## build kk binary
@CGO_ENABLED=0 GOARCH=$(GOARCH) GOOS=$(GOOS) go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk cmd/kk/kubekey.go
ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
.PHONY: docker-build ## build and push all images
docker-build: docker-build-operator docker-build-kk
.PHONY: managers
managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
.PHONY: docker-build-operator
docker-build-operator: ## Build the docker image for operator
@$(DOCKER_PUSH) \
--build-arg builder_image=$(GO_CONTAINER_IMAGE) \
--build-arg goproxy=$(GOPROXY) \
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
-f build/controller-manager/Dockerfile -t $(OPERATOR_CONTROLLER_IMG):$(TAG) .
.PHONY: manager-capkk
manager-capkk: ## Build the capkk manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
.PHONY: docker-build-kk
docker-build-kk: ## Build the docker image for kk
@$(DOCKER_PUSH) \
--build-arg builder_image=$(GO_CONTAINER_IMAGE) \
--build-arg goproxy=$(GOPROXY) \
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
-f build/kk/Dockerfile -t $(EXECUTOR_CONTROLLER_IMG):$(TAG) .
.PHONY: manager-k3s-bootstrap
manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
.PHONY: manager-k3s-control-plane
manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
#ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
.PHONY: docker-pull-prerequisites
docker-pull-prerequisites:
docker pull docker.io/docker/dockerfile:1.4
docker pull $(GO_CONTAINER_IMAGE)
.PHONY: docker-build-all
docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
docker-build-%:
$(MAKE) ARCH=$* docker-build
ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
.PHONY: docker-build
docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
$(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
.PHONY: docker-build-capkk
docker-build-capkk: ## Build the docker image for capkk
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-k3s-bootstrap
docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-k3s-control-plane
docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-build-e2e
docker-build-e2e: ## Build the docker image for capkk
$(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
#.PHONY: managers
#managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
#
#.PHONY: manager-capkk
#manager-capkk: ## Build the capkk manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
#
#.PHONY: manager-k3s-bootstrap
#manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
#
#.PHONY: manager-k3s-control-plane
#manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
#
#.PHONY: docker-pull-prerequisites
#docker-pull-prerequisites:
# docker pull docker.io/docker/dockerfile:1.4
# docker pull $(GO_CONTAINER_IMAGE)
#
#.PHONY: docker-build-all
#docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
#
#docker-build-%:
# $(MAKE) ARCH=$* docker-build
#
#ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
#
#.PHONY: docker-build
#docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
# $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
#
#.PHONY: docker-build-capkk
#docker-build-capkk: ## Build the docker image for capkk
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-k3s-bootstrap
#docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-k3s-control-plane
#docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-e2e
#docker-build-e2e: ## Build the docker image for capkk
# $(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
## --------------------------------------
## Deployment
@ -299,29 +292,33 @@ docker-build-e2e: ## Build the docker image for capkk
##@ deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: helm-package
helm-package: ## Helm-package.
helm package config/helm -d $(OUTPUT_DIR)
.PHONY: install
install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
.PHONY: deploy
deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
cd config/manager
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
#ifndef ignore-not-found
# ignore-not-found = false
#endif
#
#.PHONY: install
#install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
# $(KUSTOMIZE) build config/crd | kubectl apply -f -
#
#.PHONY: uninstall
#uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
# $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
#
#.PHONY: deploy
#deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
# cd config/manager
# $(KUSTOMIZE) build config/default | kubectl apply -f -
#
#.PHONY: undeploy
#undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
# $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
## --------------------------------------
## Testing
@ -329,41 +326,40 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
##@ test:
ARTIFACTS ?= ${ROOT_DIR}/_artifacts
ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
else
KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
endif
#ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
#else
# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
#endif
#
#.PHONY: test
#test: $(SETUP_ENVTEST) ## Run unit and integration tests
# KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
.PHONY: test
test: $(SETUP_ENVTEST) ## Run unit and integration tests
KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
.PHONY: test-verbose
test-verbose: ## Run unit and integration tests with verbose flag
$(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
.PHONY: test-junit
test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
$(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
exit $$(cat $(ARTIFACTS)/junit.exitcode)
.PHONY: test-cover
test-cover: ## Run unit and integration tests and generate a coverage report
$(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
go tool cover -func=out/coverage.out -o out/coverage.txt
go tool cover -html=out/coverage.out -o out/coverage.html
.PHONY: test-e2e
test-e2e: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run
.PHONY: test-e2e-k3s
test-e2e-k3s: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run-k3s
#.PHONY: test-verbose
#test-verbose: ## Run unit and integration tests with verbose flag
# $(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
#
#.PHONY: test-junit
#test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
# set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
# $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
# exit $$(cat $(ARTIFACTS)/junit.exitcode)
#
#.PHONY: test-cover
#test-cover: ## Run unit and integration tests and generate a coverage report
# $(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
# go tool cover -func=out/coverage.out -o out/coverage.txt
# go tool cover -html=out/coverage.out -o out/coverage.html
#
#.PHONY: test-e2e
#test-e2e: ## Run e2e tests
# $(MAKE) -C $(TEST_DIR)/e2e run
#
#.PHONY: test-e2e-k3s
#test-e2e-k3s: ## Run e2e tests
# $(MAKE) -C $(TEST_DIR)/e2e run-k3s
## --------------------------------------
## Release
@ -372,144 +368,145 @@ test-e2e-k3s: ## Run e2e tests
##@ release:
## latest git tag for the commit, e.g., v0.3.10
RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
ifneq (,$(findstring -,$(RELEASE_TAG)))
PRE_RELEASE=true
endif
# the previous release tag, e.g., v0.3.9, excluding pre-release tags
PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
RELEASE_DIR := out
$(RELEASE_DIR):
mkdir -p $(RELEASE_DIR)/
.PHONY: release
release: clean-release ## Build and push container images using the latest git tag for the commit
@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
git checkout "${RELEASE_TAG}"
## Build binaries first.
GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
# Set the manifest image to the production bucket.
$(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
## Build the manifests
$(MAKE) release-manifests
## Build the templates
$(MAKE) release-templates
## Clean the git artifacts modified in the release process
$(MAKE) clean-release-git
release-binaries: ## Build the binaries to publish with a release
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
release-binary: $(RELEASE_DIR)
docker run \
--rm \
-e CGO_ENABLED=0 \
-e GOOS=$(GOOS) \
-e GOARCH=$(GOARCH) \
-e GOPROXY=$(GOPROXY) \
-v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
-w /workspace \
golang:$(GO_VERSION) \
go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
-o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
release-archive: $(RELEASE_DIR)
tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
.PHONY: manifest-modification
manifest-modification: # Set the manifest images to the staging/production bucket.
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-image \
MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
.PHONY: release-manifests
release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
# Build capkk-components.
$(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
# Build bootstrap-components.
$(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
# Build control-plane-components.
$(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
# Add metadata to the release artifacts
cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
.PHONY: release-templates
release-templates: $(RELEASE_DIR) ## Generate release templates
cp templates/cluster-template*.yaml $(RELEASE_DIR)/
.PHONY: release-prod
release-prod: ## Build and push container images to the prod
REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
#RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
#ifneq (,$(findstring -,$(RELEASE_TAG)))
# PRE_RELEASE=true
#endif
## the previous release tag, e.g., v0.3.9, excluding pre-release tags
#PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
#RELEASE_DIR := out
#
#$(RELEASE_DIR):
# mkdir -p $(RELEASE_DIR)/
#
#.PHONY: release
#release: clean-release ## Build and push container images using the latest git tag for the commit
# @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
# @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
# git checkout "${RELEASE_TAG}"
# ## Build binaries first.
# GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
# # Set the manifest image to the production bucket.
# $(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
# ## Build the manifests
# $(MAKE) release-manifests
# ## Build the templates
# $(MAKE) release-templates
# ## Clean the git artifacts modified in the release process
# $(MAKE) clean-release-git
#
#release-binaries: ## Build the binaries to publish with a release
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
#
#release-binary: $(RELEASE_DIR)
# docker run \
# --rm \
# -e CGO_ENABLED=0 \
# -e GOOS=$(GOOS) \
# -e GOARCH=$(GOARCH) \
# -e GOPROXY=$(GOPROXY) \
# -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
# -w /workspace \
# golang:$(GO_VERSION) \
# go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
# -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
#
#release-archive: $(RELEASE_DIR)
# tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
# rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
#
#.PHONY: manifest-modification
#manifest-modification: # Set the manifest images to the staging/production bucket.
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
#
#.PHONY: release-manifests
#release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
# # Build capkk-components.
# $(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
# # Build bootstrap-components.
# $(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
# # Build control-plane-components.
# $(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
#
# # Add metadata to the release artifacts
# cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
#
#.PHONY: release-templates
#release-templates: $(RELEASE_DIR) ## Generate release templates
# cp templates/cluster-template*.yaml $(RELEASE_DIR)/
#
#.PHONY: release-prod
#release-prod: ## Build and push container images to the prod
# REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
## --------------------------------------
## Docker
## --------------------------------------
.PHONY: docker-push-all
docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
$(MAKE) docker-push-manifest-capkk
$(MAKE) docker-push-manifest-k3s-bootstrap
$(MAKE) docker-push-manifest-k3s-control-plane
docker-push-%:
$(MAKE) ARCH=$* docker-push
.PHONY: docker-push
docker-push: ## Push the docker images
docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: docker-push-manifest-capkk
docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
.PHONY: docker-push-manifest-k3s-bootstrap
docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
.PHONY: docker-push-manifest-k3s-control-plane
docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
.PHONY: set-manifest-pull-policy
set-manifest-pull-policy:
$(info Updating kustomize pull policy file for manager resources)
sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
.PHONY: set-manifest-image
set-manifest-image:
$(info Updating kustomize image patch file for manager resource)
sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
#
#.PHONY: docker-push-all
#docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
# $(MAKE) docker-push-manifest-capkk
# $(MAKE) docker-push-manifest-k3s-bootstrap
# $(MAKE) docker-push-manifest-k3s-control-plane
#
#docker-push-%:
# $(MAKE) ARCH=$* docker-push
#
#.PHONY: docker-push
#docker-push: ## Push the docker images
# docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
# docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
# docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-push-manifest-capkk
#docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
#
#.PHONY: docker-push-manifest-k3s-bootstrap
#docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
#
#.PHONY: docker-push-manifest-k3s-control-plane
#docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
#
#.PHONY: set-manifest-pull-policy
#set-manifest-pull-policy:
# $(info Updating kustomize pull policy file for manager resources)
# sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
#
#.PHONY: set-manifest-image
#set-manifest-image:
# $(info Updating kustomize image patch file for manager resource)
# sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
## --------------------------------------
## Cleanup / Verification
@ -524,20 +521,19 @@ clean: ## Remove all generated files
.PHONY: clean-bin
clean-bin: ## Remove all generated binaries
rm -rf $(BIN_DIR)
rm -rf $(TOOLS_BIN_DIR)
.PHONY: clean-release
clean-release: ## Remove the release folder
rm -rf $(RELEASE_DIR)
.PHONY: clean-release-git
clean-release-git: ## Restores the git files usually modified during a release
git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
.PHONY: clean-generated-yaml
clean-generated-yaml: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name '*.yaml' -exec rm -f {} \;; done)
#.PHONY: clean-release
#clean-release: ## Remove the release folder
# rm -rf $(RELEASE_DIR)
#.PHONY: clean-release-git
#clean-release-git: ## Restores the git files usually modified during a release
# git restore ./*manager_image_patch.yaml ./*manager_pull_policy.yaml
#
#.PHONY: clean-generated-yaml
#clean-generated-yaml: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
# (IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name '*.yaml' -exec rm -f {} \;; done)
#
.PHONY: clean-generated-deepcopy
clean-generated-deepcopy: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
@ -564,63 +560,42 @@ $(SETUP_ENVTEST_BIN): $(SETUP_ENVTEST) ## Build a local copy of setup-envtest.
$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
$(CONTROLLER_GEN): # Build controller-gen from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
GOBIN=$(BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
$(GOTESTSUM): # Build gotestsum from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
GOBIN=$(BIN_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
$(KUSTOMIZE): # Build kustomize from tools folder.
CGO_ENABLED=0 GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
CGO_ENABLED=0 GOBIN=$(BIN_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
$(SETUP_ENVTEST): # Build setup-envtest from tools folder.
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
GOBIN=$(BIN_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder.
hack/ensure-golangci-lint.sh \
-b $(TOOLS_BIN_DIR) \
-b $(BIN_DIR) \
$(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
# build the artifact of repository iso
ISO_ARCH ?= amd64
ISO_OUTPUT_DIR ?= ./output
ISO_BUILD_WORKDIR := hack/gen-repository-iso
ISO_OS_NAMES := centos7 debian9 debian10 ubuntu1604 ubuntu1804 ubuntu2004 ubuntu2204
ISO_BUILD_NAMES := $(addprefix build-iso-,$(ISO_OS_NAMES))
build-iso-all: $(ISO_BUILD_NAMES)
.PHONY: $(ISO_BUILD_NAMES)
$(ISO_BUILD_NAMES):
@export DOCKER_BUILDKIT=1
docker build \
--platform linux/$(ISO_ARCH) \
--build-arg TARGETARCH=$(ISO_ARCH) \
-o type=local,dest=$(ISO_OUTPUT_DIR) \
-f $(ISO_BUILD_WORKDIR)/dockerfile.$(subst build-iso-,,$@) \
$(ISO_BUILD_WORKDIR)
#ISO_ARCH ?= amd64
#ISO_OUTPUT_DIR ?= ./output
#ISO_BUILD_WORKDIR := hack/gen-repository-iso
#ISO_OS_NAMES := centos7 debian9 debian10 ubuntu1604 ubuntu1804 ubuntu2004 ubuntu2204
#ISO_BUILD_NAMES := $(addprefix build-iso-,$(ISO_OS_NAMES))
#build-iso-all: $(ISO_BUILD_NAMES)
#.PHONY: $(ISO_BUILD_NAMES)
#$(ISO_BUILD_NAMES):
# @export DOCKER_BUILDKIT=1
# docker build \
# --platform linux/$(ISO_ARCH) \
# --build-arg TARGETARCH=$(ISO_ARCH) \
# -o type=local,dest=$(ISO_OUTPUT_DIR) \
# -f $(ISO_BUILD_WORKDIR)/dockerfile.$(subst build-iso-,,$@) \
# $(ISO_BUILD_WORKDIR)
#
#go-releaser-test:
# goreleaser release --rm-dist --skip-publish --snapshot
go-releaser-test:
goreleaser release --rm-dist --skip-publish --snapshot
.PHONY: generate-go-deepcopy-kubekey
generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
$(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./pkg/apis/... \
.PHONY: generate-manifests-kubekey
generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) \
paths=./pkg/apis/... \
crd \
output:crd:dir=./config/helm/crds/
helm-package: ## Helm-package.
helm package config/helm -d ./bin
.PHONY: docker-build-operator
docker-build-operator: ## Build the docker image for operator
DOCKER_BUILDKIT=1 docker build --push --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG):$(TAG)
# Format all import, `goimports` is required.
goimports: ## Format all import, `goimports` is required.

View File

@ -5,4 +5,8 @@
3. 支持connector扩展
4. 支持云原生方式自动化批量任务管理
# 示例
# 安装kubekey
## kubernetes中安装
```shell
helm upgrade --install --create-namespace -n kubekey-system kubekey oci://hub.kubesphere.com.cn/kubekey/kubekey
```

View File

@ -0,0 +1,37 @@
ARG builder_image
# Build the manager binary
FROM ${builder_image} as builder
ARG goproxy=https://goproxy.cn,direct
ENV GOPROXY ${goproxy}
WORKDIR /workspace
COPY go.mod go.mod
COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN --mount=type=cache,target=/go/pkg/mod go mod download
# Copy the go source
COPY ./ ./
ARG ldflags
ARG build_tags
ENV LDFLAGS ${ldflags}
ENV BUILDTAGS ${build_tags}
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o controller-manager cmd/controller-manager/controller_manager.go
FROM alpine:3.19.0
WORKDIR /kubekey
COPY --from=builder /workspace/controller-manager /usr/local/bin/controller-manager
ENTRYPOINT ["sh"]

44
build/kk/Dockerfile Normal file
View File

@ -0,0 +1,44 @@
ARG builder_image
# Build the manager binary
FROM ${builder_image} as builder
ARG goproxy=https://goproxy.cn,direct
ENV GOPROXY ${goproxy}
WORKDIR /workspace
COPY go.mod go.mod
COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN --mount=type=cache,target=/go/pkg/mod go mod download
# Copy the go source
COPY ./ ./
ARG ldflags
ARG build_tags
ENV LDFLAGS ${ldflags}
ENV BUILDTAGS ${build_tags}
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o kk cmd/kk/kubekey.go
FROM alpine:3.19.0
WORKDIR /kubekey
# install tool
RUN apk update && apk add bash && apk add curl && apk add openssl
RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \
chmod 700 get_helm.sh && \
./get_helm.sh
COPY --from=ghcr.io/oras-project/oras:v1.1.0 /bin/oras /usr/local/bin/oras
COPY --from=builder /workspace/kk /usr/local/bin/kk
ENTRYPOINT ["sh"]

View File

@ -1,3 +1,4 @@
work_dir: /kubekey
artifact:
arch: [ "amd64" ]
# offline artifact package for kk.

View File

@ -14,29 +14,37 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package app
package options
import (
"flag"
"fmt"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"strings"
"github.com/google/gops/agent"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
)
// ======================================================================================
// PROFILING
// ======================================================================================
var (
profileName string
profileOutput string
)
func addProfilingFlags(flags *pflag.FlagSet) {
func AddProfilingFlags(flags *pflag.FlagSet) {
flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
}
func initProfiling() error {
func InitProfiling() error {
var (
f *os.File
err error
@ -73,14 +81,14 @@ func initProfiling() error {
go func() {
<-c
f.Close()
flushProfiling()
FlushProfiling()
os.Exit(0)
}()
return nil
}
func flushProfiling() error {
func FlushProfiling() error {
switch profileName {
case "none":
return nil
@ -104,3 +112,38 @@ func flushProfiling() error {
return nil
}
// ======================================================================================
// GOPS
// ======================================================================================
var gops bool
func AddGOPSFlags(flags *pflag.FlagSet) {
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
}
func InitGOPS() error {
if gops {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
return nil
}
// ======================================================================================
// KLOG
// ======================================================================================
func AddKlogFlags(fs *pflag.FlagSet) {
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
fs.AddGoFlag(fl)
})
}

View File

@ -17,38 +17,23 @@ limitations under the License.
package options
import (
"flag"
"strings"
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
)
type ControllerManagerServerOptions struct {
// Enable gops or not.
GOPSEnabled bool
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
// ControllerGates is the list of controller gates to enable or disable controller.
// '*' means "all enabled by default controllers"
// 'foo' means "enable 'foo'"
// '-foo' means "disable 'foo'"
// first item for a particular name wins.
// e.g. '-foo,foo' means "disable foo", 'foo,-foo' means "enable foo"
// * has the lowest priority.
// e.g. *,-foo, means "disable 'foo'"
ControllerGates []string
MaxConcurrentReconciles int
LeaderElection bool
}
func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
return &ControllerManagerServerOptions{
WorkDir: "/var/lib/kubekey",
ControllerGates: []string{"*"},
WorkDir: "/kubekey",
MaxConcurrentReconciles: 1,
}
}
@ -56,22 +41,9 @@ func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.BoolVar(&o.GOPSEnabled, "gops", o.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
kfs := fss.FlagSet("klog")
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
kfs.AddGoFlag(fl)
})
cfs := fss.FlagSet("controller-manager")
cfs.StringSliceVar(&o.ControllerGates, "controllers", o.ControllerGates, "The list of controller gates to enable or disable controller. "+
"'*' means \"all enabled by default controllers\"")
cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
return fss

View File

@ -21,7 +21,6 @@ import (
"io/fs"
"os"
"github.com/google/gops/agent"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
@ -36,15 +35,16 @@ func NewControllerManagerCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "controller-manager",
Short: "kubekey controller manager",
RunE: func(cmd *cobra.Command, args []string) error {
if o.GOPSEnabled {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
PersistentPreRunE: func(*cobra.Command, []string) error {
if err := options.InitGOPS(); err != nil {
return err
}
return options.InitProfiling()
},
PersistentPostRunE: func(*cobra.Command, []string) error {
return options.FlushProfiling()
},
RunE: func(cmd *cobra.Command, args []string) error {
o.Complete(cmd, args)
// create workdir directory,if not exists
_const.SetWorkDir(o.WorkDir)
@ -57,16 +57,23 @@ func NewControllerManagerCommand() *cobra.Command {
},
}
// add common flag
flags := cmd.PersistentFlags()
options.AddProfilingFlags(flags)
options.AddKlogFlags(flags)
options.AddGOPSFlags(flags)
fs := cmd.Flags()
for _, f := range o.Flags().FlagSets {
fs.AddFlagSet(f)
}
cmd.AddCommand(newVersionCommand())
return cmd
}
func run(ctx context.Context, o *options.ControllerManagerServerOptions) error {
return manager.NewControllerManager(manager.ControllerManagerOptions{
ControllerGates: o.ControllerGates,
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
LeaderElection: o.LeaderElection,
}).Run(ctx)

View File

@ -14,30 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
package app
import ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
import (
"github.com/spf13/cobra"
type Options struct {
ControllerGates []string
"github.com/kubesphere/kubekey/v4/version"
)
ctrlcontroller.Options
}
// IsControllerEnabled check if a specified controller enabled or not.
func (o Options) IsControllerEnabled(name string) bool {
hasStar := false
for _, ctrl := range o.ControllerGates {
if ctrl == name {
return true
}
if ctrl == "-"+name {
return false
}
if ctrl == "*" {
hasStar = true
}
func newVersionCommand() *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "Print the version of KubeSphere controller-manager",
Run: func(cmd *cobra.Command, args []string) {
cmd.Println(version.Get())
},
}
return hasStar
}

View File

@ -58,7 +58,7 @@ func newCreateClusterCommand() *cobra.Command {
return err
}
}
return run(signals.SetupSignalHandler(), pipeline, config, inventory, o.CommonOptions)
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
},
}

View File

@ -1,40 +0,0 @@
/*
Copyright 2024 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/google/gops/agent"
"github.com/spf13/pflag"
)
var gops bool
func addGOPSFlags(flags *pflag.FlagSet) {
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
}
func initGOPS() error {
if gops {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
return nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2024 The KubeSphere Authors.
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,178 +17,133 @@ limitations under the License.
package options
import (
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"os/signal"
"runtime"
"runtime/pprof"
"strings"
corev1 "k8s.io/api/core/v1"
cliflag "k8s.io/component-base/cli/flag"
"github.com/google/gops/agent"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
"github.com/kubesphere/kubekey/v4/builtin"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
type CommonOptions struct {
// Playbook which to execute.
Playbook string
// HostFile is the path of host file
InventoryFile string
// ConfigFile is the path of config file
ConfigFile string
// Set value in config
Set []string
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
// ======================================================================================
// PROFILING
// ======================================================================================
var (
profileName string
profileOutput string
)
func AddProfilingFlags(flags *pflag.FlagSet) {
flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
}
func newCommonOptions() CommonOptions {
o := CommonOptions{}
wd, err := os.Getwd()
if err != nil {
klog.ErrorS(err, "get current dir error")
o.WorkDir = "/tmp/kk"
} else {
o.WorkDir = wd
}
return o
}
func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringSliceVar(&o.Set, "set", o.Set, "set value in config. format --set key=val")
gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini")
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
return fss
}
func (o *CommonOptions) completeRef(pipeline *kubekeyv1.Pipeline) (*kubekeyv1.Config, *kubekeyv1.Inventory, error) {
if !filepath.IsAbs(o.WorkDir) {
wd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("get current dir error: %v", err)
}
o.WorkDir = filepath.Join(wd, o.WorkDir)
}
config, err := genConfig(o.ConfigFile)
if err != nil {
return nil, nil, fmt.Errorf("generate config error: %v", err)
}
if wd, err := config.GetValue("work_dir"); err == nil && wd != nil {
// if work_dir is defined in config, use it. otherwise use current dir.
o.WorkDir = wd.(string)
} else if err := config.SetValue("work_dir", o.WorkDir); err != nil {
return nil, nil, fmt.Errorf("work_dir to config error: %v", err)
}
for _, s := range o.Set {
ss := strings.Split(s, "=")
if len(ss) != 2 {
return nil, nil, fmt.Errorf("--set value should be k=v")
}
if err := setValue(config, ss[0], ss[1]); err != nil {
return nil, nil, fmt.Errorf("--set value to config error: %v", err)
}
}
pipeline.Spec.ConfigRef = &corev1.ObjectReference{
Kind: config.Kind,
Namespace: config.Namespace,
Name: config.Name,
UID: config.UID,
APIVersion: config.APIVersion,
ResourceVersion: config.ResourceVersion,
}
inventory, err := genInventory(o.InventoryFile)
if err != nil {
return nil, nil, fmt.Errorf("generate inventory error: %v", err)
}
pipeline.Spec.InventoryRef = &corev1.ObjectReference{
Kind: inventory.Kind,
Namespace: inventory.Namespace,
Name: inventory.Name,
UID: inventory.UID,
APIVersion: inventory.APIVersion,
ResourceVersion: inventory.ResourceVersion,
}
return config, inventory, nil
}
func genConfig(configFile string) (*kubekeyv1.Config, error) {
func InitProfiling() error {
var (
config = &kubekeyv1.Config{}
cdata []byte
err error
f *os.File
err error
)
if configFile != "" {
cdata, err = os.ReadFile(configFile)
} else {
cdata = builtin.DefaultConfig
}
if err != nil {
return nil, fmt.Errorf("read config file error: %v", err)
}
if err := yaml.Unmarshal(cdata, config); err != nil {
return nil, fmt.Errorf("unmarshal config file error: %v", err)
}
if config.Namespace == "" {
config.Namespace = corev1.NamespaceDefault
}
return config, nil
}
func genInventory(inventoryFile string) (*kubekeyv1.Inventory, error) {
var (
inventory = &kubekeyv1.Inventory{}
cdata []byte
err error
)
if inventoryFile != "" {
cdata, err = os.ReadFile(inventoryFile)
} else {
cdata = builtin.DefaultInventory
}
if err != nil {
klog.V(4).ErrorS(err, "read config file error")
return nil, err
}
if err := yaml.Unmarshal(cdata, inventory); err != nil {
klog.V(4).ErrorS(err, "unmarshal config file error")
return nil, err
}
if inventory.Namespace == "" {
inventory.Namespace = corev1.NamespaceDefault
}
return inventory, nil
}
func setValue(config *kubekeyv1.Config, key, val string) error {
switch {
case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"):
var value map[string]any
err := json.Unmarshal([]byte(val), &value)
switch profileName {
case "none":
return nil
case "cpu":
f, err = os.Create(profileOutput)
if err != nil {
return err
}
return config.SetValue(key, value)
case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"):
var value []any
err := json.Unmarshal([]byte(val), &value)
err = pprof.StartCPUProfile(f)
if err != nil {
return err
}
return config.SetValue(key, value)
// Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
// output anything. We choose to sample all events.
case "block":
runtime.SetBlockProfileRate(1)
case "mutex":
runtime.SetMutexProfileFraction(1)
default:
return config.SetValue(key, val)
// Check the profile name is valid.
if profile := pprof.Lookup(profileName); profile == nil {
return fmt.Errorf("unknown profile '%s'", profileName)
}
}
// If the command is interrupted before the end (ctrl-c), flush the
// profiling files
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
f.Close()
FlushProfiling()
os.Exit(0)
}()
return nil
}
func FlushProfiling() error {
switch profileName {
case "none":
return nil
case "cpu":
pprof.StopCPUProfile()
case "heap":
runtime.GC()
fallthrough
default:
profile := pprof.Lookup(profileName)
if profile == nil {
return nil
}
f, err := os.Create(profileOutput)
if err != nil {
return err
}
defer f.Close()
profile.WriteTo(f, 0)
}
return nil
}
// ======================================================================================
// GOPS
// ======================================================================================
var gops bool
func AddGOPSFlags(flags *pflag.FlagSet) {
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
}
func InitGOPS() error {
if gops {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
}
}
return nil
}
// ======================================================================================
// KLOG
// ======================================================================================
func AddKlogFlags(fs *pflag.FlagSet) {
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
fs.AddGoFlag(fl)
})
}

View File

@ -19,10 +19,11 @@ package options
import (
"fmt"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cliflag "k8s.io/component-base/cli/flag"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
func NewCreateClusterOptions() *CreateClusterOptions {
@ -53,7 +54,7 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kub
pipeline := &kubekeyv1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "create-cluster-",
Namespace: metav1.NamespaceDefault,
Namespace: o.Namespace,
Annotations: map[string]string{
kubekeyv1.BuiltinsProjectAnnotation: "",
},

View File

@ -0,0 +1,195 @@
/*
Copyright 2024 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
"github.com/kubesphere/kubekey/v4/builtin"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
type CommonOptions struct {
// Playbook which to execute.
Playbook string
// HostFile is the path of host file
InventoryFile string
// ConfigFile is the path of config file
ConfigFile string
// Set value in config
Set []string
// WorkDir is the baseDir which command find any resource (project etc.)
WorkDir string
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
Debug bool
// Namespace for all resources.
Namespace string
}
func newCommonOptions() CommonOptions {
o := CommonOptions{
Namespace: metav1.NamespaceDefault,
}
wd, err := os.Getwd()
if err != nil {
klog.ErrorS(err, "get current dir error")
o.WorkDir = "/tmp/kubekey"
} else {
o.WorkDir = wd
}
return o
}
func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringSliceVar(&o.Set, "set", o.Set, "set value in config. format --set key=val")
gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini")
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace")
return fss
}
func (o *CommonOptions) completeRef(pipeline *kubekeyv1.Pipeline) (*kubekeyv1.Config, *kubekeyv1.Inventory, error) {
if !filepath.IsAbs(o.WorkDir) {
wd, err := os.Getwd()
if err != nil {
return nil, nil, fmt.Errorf("get current dir error: %w", err)
}
o.WorkDir = filepath.Join(wd, o.WorkDir)
}
config, err := genConfig(o.ConfigFile)
if err != nil {
return nil, nil, fmt.Errorf("generate config error: %w", err)
}
config.Namespace = o.Namespace
if wd, err := config.GetValue("work_dir"); err == nil && wd != nil {
// if work_dir is defined in config, use it. otherwise use current dir.
o.WorkDir = wd.(string)
} else if err := config.SetValue("work_dir", o.WorkDir); err != nil {
return nil, nil, fmt.Errorf("work_dir to config error: %w", err)
}
for _, s := range o.Set {
ss := strings.Split(s, "=")
if len(ss) != 2 {
return nil, nil, fmt.Errorf("--set value should be k=v")
}
if err := setValue(config, ss[0], ss[1]); err != nil {
return nil, nil, fmt.Errorf("--set value to config error: %w", err)
}
}
pipeline.Spec.ConfigRef = &corev1.ObjectReference{
Kind: config.Kind,
Namespace: config.Namespace,
Name: config.Name,
UID: config.UID,
APIVersion: config.APIVersion,
ResourceVersion: config.ResourceVersion,
}
inventory, err := genInventory(o.InventoryFile)
if err != nil {
return nil, nil, fmt.Errorf("generate inventory error: %w", err)
}
inventory.Namespace = o.Namespace
pipeline.Spec.InventoryRef = &corev1.ObjectReference{
Kind: inventory.Kind,
Namespace: inventory.Namespace,
Name: inventory.Name,
UID: inventory.UID,
APIVersion: inventory.APIVersion,
ResourceVersion: inventory.ResourceVersion,
}
return config, inventory, nil
}
func genConfig(configFile string) (*kubekeyv1.Config, error) {
var (
config = &kubekeyv1.Config{}
cdata []byte
err error
)
if configFile != "" {
cdata, err = os.ReadFile(configFile)
} else {
cdata = builtin.DefaultConfig
}
if err != nil {
return nil, fmt.Errorf("read config file error: %w", err)
}
if err := yaml.Unmarshal(cdata, config); err != nil {
return nil, fmt.Errorf("unmarshal config file error: %w", err)
}
return config, nil
}
func genInventory(inventoryFile string) (*kubekeyv1.Inventory, error) {
var (
inventory = &kubekeyv1.Inventory{}
cdata []byte
err error
)
if inventoryFile != "" {
cdata, err = os.ReadFile(inventoryFile)
} else {
cdata = builtin.DefaultInventory
}
if err != nil {
klog.V(4).ErrorS(err, "read config file error")
return nil, err
}
if err := yaml.Unmarshal(cdata, inventory); err != nil {
klog.V(4).ErrorS(err, "unmarshal config file error")
return nil, err
}
return inventory, nil
}
func setValue(config *kubekeyv1.Config, key, val string) error {
switch {
case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"):
var value map[string]any
err := json.Unmarshal([]byte(val), &value)
if err != nil {
return err
}
return config.SetValue(key, value)
case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"):
var value []any
err := json.Unmarshal([]byte(val), &value)
if err != nil {
return err
}
return config.SetValue(key, value)
default:
return config.SetValue(key, val)
}
}

View File

@ -0,0 +1,34 @@
package options
import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cliflag "k8s.io/component-base/cli/flag"
)
type PipelineOptions struct {
Name string
Namespace string
WorkDir string
}
func NewPipelineOption() *PipelineOptions {
return &PipelineOptions{
Namespace: metav1.NamespaceDefault,
WorkDir: "/kubekey",
}
}
func (o *PipelineOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
pfs := fss.FlagSet("pipeline flags")
pfs.StringVar(&o.Name, "name", o.Name, "name of pipeline")
pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of pipeline")
pfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
return fss
}
func (o *PipelineOptions) Complete(cmd *cobra.Command, args []string) {
// do nothing
}

View File

@ -43,7 +43,7 @@ func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kubekeyv
pipeline := &kubekeyv1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "precheck-",
Namespace: metav1.NamespaceDefault,
Namespace: o.Namespace,
Annotations: map[string]string{
kubekeyv1.BuiltinsProjectAnnotation: "",
},

View File

@ -79,7 +79,7 @@ func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kubeke
pipeline := &kubekeyv1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "run-",
Namespace: metav1.NamespaceDefault,
Namespace: o.Namespace,
Annotations: map[string]string{},
},
}

69
cmd/kk/app/pipeline.go Normal file
View File

@ -0,0 +1,69 @@
package app
import (
"fmt"
"github.com/spf13/cobra"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/manager"
"github.com/kubesphere/kubekey/v4/pkg/proxy"
)
func newPipelineCommand() *cobra.Command {
o := options.NewPipelineOption()
cmd := &cobra.Command{
Use: "pipeline",
Short: "Executor a pipeline in kubernetes",
RunE: func(cmd *cobra.Command, args []string) error {
_const.SetWorkDir(o.WorkDir)
restconfig, err := proxy.NewConfig()
if err != nil {
return fmt.Errorf("could not get rest config: %w", err)
}
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
ctx := signals.SetupSignalHandler()
var pipeline = new(kubekeyv1.Pipeline)
var config = new(kubekeyv1.Config)
var inventory = new(kubekeyv1.Inventory)
if err := client.Get(ctx, ctrlclient.ObjectKey{
Name: o.Name,
Namespace: o.Namespace,
}, pipeline); err != nil {
return err
}
if err := client.Get(ctx, ctrlclient.ObjectKey{
Name: pipeline.Spec.ConfigRef.Name,
Namespace: pipeline.Spec.ConfigRef.Namespace,
}, config); err != nil {
return err
}
if err := client.Get(ctx, ctrlclient.ObjectKey{
Name: pipeline.Spec.InventoryRef.Name,
Namespace: pipeline.Spec.InventoryRef.Namespace,
}, inventory); err != nil {
return err
}
return manager.NewCommandManager(manager.CommandManagerOptions{
Pipeline: pipeline,
Config: config,
Inventory: inventory,
Client: client,
}).Run(ctx)
},
}
fs := cmd.Flags()
for _, f := range o.Flags().FlagSets {
fs.AddFlagSet(f)
}
return cmd
}

View File

@ -50,7 +50,7 @@ func newPreCheckCommand() *cobra.Command {
return err
}
}
return run(signals.SetupSignalHandler(), pipeline, config, inventory, o.CommonOptions)
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
},
}

View File

@ -17,12 +17,9 @@ limitations under the License.
package app
import (
"flag"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
)
var internalCommand = []*cobra.Command{}
@ -37,39 +34,31 @@ func registerInternalCommand(command *cobra.Command) {
internalCommand = append(internalCommand, command)
}
func NewKubeKeyCommand() *cobra.Command {
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "kk",
Long: "kubekey is a daemon that execute command in a node",
PersistentPreRunE: func(*cobra.Command, []string) error {
if err := initGOPS(); err != nil {
if err := options.InitGOPS(); err != nil {
return err
}
return initProfiling()
return options.InitProfiling()
},
PersistentPostRunE: func(*cobra.Command, []string) error {
return flushProfiling()
return options.FlushProfiling()
},
}
// add common flag
flags := cmd.PersistentFlags()
addProfilingFlags(flags)
addKlogFlags(flags)
addGOPSFlags(flags)
options.AddProfilingFlags(flags)
options.AddKlogFlags(flags)
options.AddGOPSFlags(flags)
cmd.AddCommand(newRunCommand())
cmd.AddCommand(newPipelineCommand())
cmd.AddCommand(newVersionCommand())
// internal command
cmd.AddCommand(internalCommand...)
return cmd
}
func addKlogFlags(fs *pflag.FlagSet) {
local := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(local)
local.VisitAll(func(fl *flag.Flag) {
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
fs.AddGoFlag(fl)
})
}

View File

@ -18,11 +18,13 @@ package app
import (
"context"
"fmt"
"io/fs"
"os"
"github.com/spf13/cobra"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
@ -51,7 +53,7 @@ func newRunCommand() *cobra.Command {
return err
}
}
return run(signals.SetupSignalHandler(), kk, config, inventory, o.CommonOptions)
return run(signals.SetupSignalHandler(), kk, config, inventory)
},
}
@ -61,19 +63,36 @@ func newRunCommand() *cobra.Command {
return cmd
}
func run(ctx context.Context, kk *kubekeyv1.Pipeline, config *kubekeyv1.Config, inventory *kubekeyv1.Inventory, o options.CommonOptions) error {
if err := proxy.Init(); err != nil {
func run(ctx context.Context, pipeline *kubekeyv1.Pipeline, config *kubekeyv1.Config, inventory *kubekeyv1.Inventory) error {
restconfig, err := proxy.NewConfig()
if err != nil {
return fmt.Errorf("could not get rest config: %w", err)
}
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
if err != nil {
return fmt.Errorf("could not get runtime-client: %w", err)
}
// create config, inventory and pipeline
if err := client.Create(ctx, config); err != nil {
klog.ErrorS(err, "Create config error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
}
if err := client.Create(ctx, inventory); err != nil {
klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
}
if err := client.Create(ctx, pipeline); err != nil {
klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
}
mgr, err := manager.NewCommandManager(manager.CommandManagerOptions{
Pipeline: kk,
return manager.NewCommandManager(manager.CommandManagerOptions{
Pipeline: pipeline,
Config: config,
Inventory: inventory,
})
if err != nil {
klog.ErrorS(err, "Create command manager error")
return err
}
return mgr.Run(ctx)
Client: client,
}).Run(ctx)
}

View File

@ -25,7 +25,7 @@ import (
)
func main() {
command := app.NewKubeKeyCommand()
command := app.NewRootCommand()
code := cli.Run(command)
os.Exit(code)
}

View File

@ -7,9 +7,9 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "v4.0.0"
appVersion: "dev"

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
controller-gen.kubebuilder.io/version: v0.15.0
name: configs.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
@ -19,14 +19,19 @@ spec:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
controller-gen.kubebuilder.io/version: v0.15.0
name: inventories.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
@ -19,14 +19,19 @@ spec:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
controller-gen.kubebuilder.io/version: v0.15.0
name: pipelines.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
@ -27,19 +27,27 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .metadata.labels['kubekey\.kubesphere\.io/job']
name: Job
type: string
name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@ -52,33 +60,40 @@ spec:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
description: |-
If referring to a piece of an object instead of an entire object, this string
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within a pod, this would take on a value like:
"spec.containers{name}" (where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to have some well-defined way of
referencing a part of an object.
TODO: this design is not final and this field is subject to change in the future.
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind of the referent.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
description: |-
Name of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
description: |-
Namespace of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
description: |-
Specific resourceVersion to which this reference is made, if any.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
description: |-
UID of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
type: string
type: object
x-kubernetes-map-type: atomic
@ -94,33 +109,40 @@ spec:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
description: |-
If referring to a piece of an object instead of an entire object, this string
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within a pod, this would take on a value like:
"spec.containers{name}" (where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to have some well-defined way of
referencing a part of an object.
TODO: this design is not final and this field is subject to change in the future.
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind of the referent.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
description: |-
Name of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
description: |-
Namespace of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
description: |-
Specific resourceVersion to which this reference is made, if any.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
description: |-
UID of the referent.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
type: string
type: object
x-kubernetes-map-type: atomic
@ -131,10 +153,10 @@ spec:
description: Project is storage for executable packages
properties:
addr:
description: Addr is the storage for executable packages (in Ansible
file format). When starting with http or https, it will be obtained
from a Git repository. When starting with file path, it will
be obtained from the local path.
description: |-
Addr is the storage for executable packages (in Ansible file format).
When starting with http or https, it will be obtained from a Git repository.
When starting with file path, it will be obtained from the local path.
type: string
branch:
description: Branch is the git branch of the git Addr.

View File

@ -3,7 +3,6 @@ Common labels
*/}}
{{- define "common.labels" -}}
helm.sh/chart: {{ include "common.chart" . }}
{{ include "common.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
@ -17,19 +16,30 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "operator.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.operator.image "global" .Values.global "chart" .Chart )}}
{{- end -}}
{{- define "executor.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.executor.image "global" .Values.global "chart" .Chart ) }}
{{- end -}}
{{- define "common.image" -}}
{{- $registryName := .Values.operator.image.registry -}}
{{- $repositoryName := .Values.operator.image.repository -}}
{{- define "common.images.image" -}}
{{- $registryName := .global.imageRegistry -}}
{{- $repositoryName := .imageRoot.repository -}}
{{- $separator := ":" -}}
{{- $termination := .Values.operator.image.tag | toString -}}
{{- if .Values.operator.image.digest }}
{{- $termination := .chart.AppVersion | toString -}}
{{- if .global.tag }}
{{- $termination = .global.tag | toString -}}
{{- end -}}
{{- if .imageRoot.registry }}
{{- $registryName = .imageRoot.registry -}}
{{- end -}}
{{- if .imageRoot.tag }}
{{- $termination = .imageRoot.tag | toString -}}
{{- end -}}
{{- if .imageRoot.digest }}
{{- $separator = "@" -}}
{{- $termination = .Values.operator.image.digest | toString -}}
{{- $termination = .imageRoot.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else }}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}

View File

@ -22,7 +22,7 @@ spec:
labels: {{ include "common.labels" . | nindent 8 }}
app: kk-operator
spec:
serviceAccountName: {{ .Values.serviceAccount.name }}
serviceAccountName: kk-operator
{{- if .Values.operator.pullSecrets }}
imagePullSecrets: {{ .Values.operator.pullSecrets }}
{{- end }}
@ -41,7 +41,7 @@ spec:
terminationGracePeriodSeconds: {{ .Values.operator.terminationGracePeriodSeconds }}
containers:
- name: ks-controller-manager
image: {{ template "common.image" . }}
image: {{ template "operator.image" . }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
{{- if .Values.operator.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.operator.command "context" $) | nindent 12 }}
@ -50,6 +50,12 @@ spec:
{{- if .Values.operator.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.operator.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
- name: EXECUTOR_IMAGE
value: {{ template "executor.image" . }}
- name: EXECUTOR_IMAGE_PULLPOLICY
value: {{ .Values.executor.image.pullPolicy }}
- name: EXECUTOR_SERVICEACCOUNT
value: kk-executor
{{- if .Values.operator.resources }}
resources: {{- toYaml .Values.operator.resources | nindent 12 }}
{{- end }}

View File

@ -2,35 +2,86 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.role }}
name: kk-operator
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels" . | nindent 4 }}
rules:
- apiGroups:
- kubekey.kubesphere.io
- kubekey.kubesphere.io
resources:
- configs
- inventories
- configs
- inventories
verbs:
- get
- list
- watch
- get
- list
- watch
- apiGroups:
- kubekey.kubesphere.io
- kubekey.kubesphere.io
resources:
- pipelines
- pipelines/status
- pipelines
- pipelines/status
verbs:
- "*"
- "*"
- apiGroups:
- coordination.k8s.io
- coordination.k8s.io
resources:
- leases
- leases
verbs:
- "*"
- "*"
- apiGroups:
- ""
- ""
resources:
- events
- events
verbs:
- "*"
- "*"
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- watch
- create
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- clusterrolebindings
verbs:
- get
- list
- watch
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kk-executor
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels" . | nindent 4 }}
rules:
- apiGroups:
- kubekey.kubesphere.io
resources:
- configs
- inventories
verbs:
- get
- list
- apiGroups:
- kubekey.kubesphere.io
resources:
- pipelines
- pipelines/status
verbs:
- "*"

View File

@ -1,27 +1,23 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.name }}
name: kk-operator
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels" . | nindent 4}}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.serviceAccount.name }}
name: kk-operator
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.role }}
name: kk-operator
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
name: kk-operator
namespace: {{ .Release.Namespace }}

View File

@ -1,17 +1,9 @@
## @section Common parameters
##
# the role which operator pod need
role: "kk-operator"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: "kk-operator"
global:
imageRegistry: hub.kubesphere.com.cn/kubekey
tag: ""
imagePullSecrets: []
operator:
# tolerations of operator pod
@ -51,13 +43,13 @@ operator:
pullSecrets: []
image:
registry: ""
repository: kubesphere/kubekey-operator
repository: controller-manager
tag: ""
digest: ""
pullPolicy: IfNotPresent
##
## @param resources.limits The resources limits for the haproxy containers
## @param resources.requests The requested resources for the haproxy containers
## @param resources.limits The resources limits for the operator containers
## @param resources.requests The requested resources for the operator containers
##
resources:
limits:
@ -72,7 +64,6 @@ operator:
- controller-manager
- --logtostderr=true
- --leader-election=true
- --controllers=*
## @param extraEnvVars Array with extra environment variables to add to haproxy nodes
##
extraEnvVars: []
@ -82,3 +73,11 @@ operator:
## @param extraVolumes Optionally specify extra list of additional volumes for the haproxy pod(s)
##
extraVolumes: []
executor:
image:
registry: ""
repository: executor
tag: ""
digest: ""
pullPolicy: IfNotPresent

4
go.mod
View File

@ -7,14 +7,12 @@ require (
github.com/fsnotify/fsnotify v1.7.0
github.com/go-git/go-git/v5 v5.11.0
github.com/google/gops v0.3.28
github.com/google/uuid v1.5.0
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.6
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.18.0
golang.org/x/time v0.5.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.1
k8s.io/apimachinery v0.29.1
@ -61,6 +59,7 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
@ -106,6 +105,7 @@ require (
golang.org/x/sys v0.16.0 // indirect
golang.org/x/term v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.17.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/appengine v1.6.8 // indirect

View File

@ -17,11 +17,12 @@ limitations under the License.
package v1
import (
"reflect"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
"reflect"
"strings"
)
// +genclient

View File

@ -17,9 +17,10 @@ limitations under the License.
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/runtime"
"testing"
)
func TestSetValue(t *testing.T) {

View File

@ -33,8 +33,6 @@ const (
const (
// BuiltinsProjectAnnotation use builtins project of KubeKey
BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
//// PauseAnnotation pause the pipeline
//PauseAnnotation = "kubekey.kubesphere.io/pause"
)
type PipelineSpec struct {
@ -130,6 +128,7 @@ type PipelineFailedDetailHost struct {
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Job",type="string",JSONPath=".metadata.labels['kubekey\\.kubesphere\\.io/job']"
type Pipeline struct {
metav1.TypeMeta `json:",inline"`

View File

@ -19,6 +19,8 @@ package _const
import (
"path/filepath"
"sync"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
var workDirOnce = &sync.Once{}
@ -39,3 +41,8 @@ func GetWorkDir() string {
func GetRuntimeDir() string {
return filepath.Join(workDir, RuntimeDir)
}
func RuntimeDirFromPipeline(obj kubekeyv1.Pipeline) string {
return filepath.Join(GetRuntimeDir(), kubekeyv1.SchemeGroupVersion.String(),
RuntimePipelineDir, obj.Namespace, obj.Name)
}

View File

@ -17,6 +17,9 @@ limitations under the License.
package _const
import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
@ -41,6 +44,9 @@ var (
func newScheme() *runtime.Scheme {
s := runtime.NewScheme()
batchv1.AddToScheme(s)
corev1.AddToScheme(s)
rbacv1.AddToScheme(s)
kubekeyv1.AddToScheme(s)
kubekeyv1alpha1.AddToScheme(s)
kubekeyv1alpha1.AddConversionFuncs(s)

View File

@ -18,26 +18,31 @@ package controllers
import (
"context"
"github.com/kubesphere/kubekey/v4/pkg/executor"
"k8s.io/apimachinery/pkg/runtime"
"os"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
ctrlfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
const (
pipelineFinalizer = "kubekey.kubesphere.io/pipeline"
labelJob = "kubekey.kubesphere.io/job"
defaultExecutorImage = "hub.kubesphere.com.cn/kubekey/executor:latest"
defaultPullPolicy = "IfNotPresent"
defaultServiceAccount = "kk-executor"
)
type PipelineReconciler struct {
@ -46,55 +51,40 @@ type PipelineReconciler struct {
record.EventRecorder
ctrlfinalizer.Finalizers
MaxConcurrentReconciles int
}
func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(5).InfoS("start pipeline reconcile", "pipeline", req.String())
defer klog.V(5).InfoS("finish pipeline reconcile", "pipeline", req.String())
// get pipeline
pipeline := &kubekeyv1.Pipeline{}
err := r.Client.Get(ctx, req.NamespacedName, pipeline)
if err != nil {
if errors.IsNotFound(err) {
klog.V(5).InfoS("pipeline not found", "pipeline", req.String())
klog.V(5).InfoS("pipeline not found", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
if pipeline.DeletionTimestamp != nil {
klog.V(5).InfoS("pipeline is deleting", "pipeline", req.String())
if controllerutil.ContainsFinalizer(pipeline, pipelineFinalizer) {
r.clean(ctx, pipeline)
// remove finalizer
}
klog.V(5).InfoS("pipeline is deleting", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(pipeline, pipelineFinalizer) {
excepted := pipeline.DeepCopy()
controllerutil.AddFinalizer(pipeline, pipelineFinalizer)
if err := r.Client.Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", req.String())
return ctrl.Result{}, err
}
}
switch pipeline.Status.Phase {
case "":
excepted := pipeline.DeepCopy()
pipeline.Status.Phase = kubekeyv1.PipelinePhasePending
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", req.String())
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
case kubekeyv1.PipelinePhasePending:
excepted := pipeline.DeepCopy()
pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", req.String())
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
case kubekeyv1.PipelinePhaseRunning:
@ -102,20 +92,12 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
case kubekeyv1.PipelinePhaseFailed:
// do nothing
case kubekeyv1.PipelinePhaseSucceed:
if !pipeline.Spec.Debug {
r.clean(ctx, pipeline)
}
// do nothing
}
return ctrl.Result{}, nil
}
func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kubekeyv1.Pipeline) (ctrl.Result, error) {
//if _, ok := pipeline.Annotations[kubekeyv1.PauseAnnotation]; ok {
// // if pipeline is paused, do nothing
// klog.V(5).InfoS("pipeline is paused", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
// return ctrl.Result{}, nil
//}
cp := pipeline.DeepCopy()
defer func() {
// update pipeline status
@ -124,43 +106,133 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
}
}()
if err := executor.NewTaskExecutor(r.Scheme, r.Client, pipeline).Exec(ctx); err != nil {
klog.ErrorS(err, "Create task controller error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
// check if running executor exist
if jobName, ok := pipeline.Labels[labelJob]; ok {
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: jobName}, cp); err == nil {
// job is have create
return ctrl.Result{}, nil
} else if !errors.IsNotFound(err) {
// get job failed
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// get image from env
image, ok := os.LookupEnv("EXECUTOR_IMAGE")
if !ok {
image = defaultExecutorImage
}
// get image from env
imagePullPolicy, ok := os.LookupEnv("EXECUTOR_IMAGE_PULLPOLICY")
if !ok {
imagePullPolicy = defaultPullPolicy
}
// get ServiceAccount name for executor pod
saName, ok := os.LookupEnv("EXECUTOR_SERVICEACCOUNT")
if !ok {
saName = defaultServiceAccount
}
var sa = &corev1.ServiceAccount{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, sa); err != nil {
if !errors.IsNotFound(err) {
klog.ErrorS(err, "get service account", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
// create sa
if err := r.Client.Create(ctx, &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: pipeline.Namespace},
}); err != nil {
klog.ErrorS(err, "create service account error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
}
var rb = &rbacv1.ClusterRoleBinding{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, rb); err != nil {
if !errors.IsNotFound(err) {
klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
//create rolebinding
if err := r.Client.Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Namespace: pipeline.Namespace, Name: saName},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: saName,
},
Subjects: []rbacv1.Subject{
{
APIGroup: corev1.GroupName,
Kind: "ServiceAccount",
Name: saName,
Namespace: pipeline.Namespace,
},
},
}); err != nil {
klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
}
// create a job to executor the pipeline
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pipeline.Name + "-",
Namespace: pipeline.Namespace,
},
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](1),
Completions: ptr.To[int32](1),
BackoffLimit: ptr.To[int32](0),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{
ServiceAccountName: saName,
RestartPolicy: "Never",
Containers: []corev1.Container{
{
Name: "executor",
Image: image,
ImagePullPolicy: corev1.PullPolicy(imagePullPolicy),
Command: []string{"kk"},
Args: []string{"pipeline",
"--name", pipeline.Name,
"--namespace", pipeline.Namespace},
},
},
},
},
},
}
if err := controllerutil.SetOwnerReference(pipeline, job, r.Scheme); err != nil {
return ctrl.Result{}, err
}
err := r.Create(ctx, job)
if err != nil {
return ctrl.Result{}, err
}
// add job label to pipeline
cp = pipeline.DeepCopy()
metav1.SetMetaDataLabel(&pipeline.ObjectMeta, labelJob, job.Name)
// update pipeline status
if err := r.Client.Patch(ctx, pipeline, ctrlclient.MergeFrom(cp)); err != nil {
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// clean runtime directory
func (r *PipelineReconciler) clean(ctx context.Context, pipeline *kubekeyv1.Pipeline) {
klog.V(5).InfoS("clean runtimeDir", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
// delete reference task
taskList := &kubekeyv1alpha1.TaskList{}
if err := r.Client.List(ctx, taskList, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingFields{
kubekeyv1alpha1.TaskOwnerField: ctrlclient.ObjectKeyFromObject(pipeline).String(),
}); err != nil {
klog.V(5).ErrorS(err, "list task error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return
}
// clean variable cache
variable.CleanVariable(pipeline)
if err := os.RemoveAll(_const.GetRuntimeDir()); err != nil {
klog.V(5).ErrorS(err, "clean runtime directory error", "runtime dir", _const.GetRuntimeDir(), "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *PipelineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options Options) error {
if !options.IsControllerEnabled("pipeline") {
klog.V(5).InfoS("controller is disabled", "controller", "pipeline")
return nil
}
func (r *PipelineReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options.Options).
WithOptions(ctrlcontroller.Options{
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
}).
For(&kubekeyv1.Pipeline{}).
Complete(r)
}

View File

@ -84,13 +84,13 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
if strings.HasSuffix(a.(string), "%") {
b, err := strconv.ParseFloat(a.(string)[:len(a.(string))-1], 64)
if err != nil {
return nil, fmt.Errorf("convert serial %v to float error", a)
return nil, fmt.Errorf("convert serial %v to float error: %w", a, err)
}
sis[i] = int(math.Ceil(float64(len(hosts)) * b / 100.0))
} else {
b, err := strconv.Atoi(a.(string))
if err != nil {
return nil, fmt.Errorf("convert serial %v to int faiiled", a)
return nil, fmt.Errorf("convert serial %v to int error: %w", a, err)
}
sis[i] = b
}

View File

@ -61,7 +61,7 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter second param error: %v", err),
OrigError: fmt.Errorf("converter second param error: %w", err),
}
}
return pongo2.AsValue(ci >= 0), nil
@ -71,7 +71,7 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter second param error: %v", err),
OrigError: fmt.Errorf("converter second param error: %w", err),
}
}
return pongo2.AsValue(ci <= 0), nil
@ -81,7 +81,7 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter second param error: %v", err),
OrigError: fmt.Errorf("converter second param error: %w", err),
}
}
return pongo2.AsValue(ci == 0), nil
@ -91,7 +91,7 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter second param error: %v", err),
OrigError: fmt.Errorf("converter second param error: %w", err),
}
}
return pongo2.AsValue(ci == 1), nil
@ -101,14 +101,14 @@ func filterVersion(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter second param error: %v", err),
OrigError: fmt.Errorf("converter second param error: %w", err),
}
}
return pongo2.AsValue(ci == -1), nil
default:
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "filter:version",
OrigError: fmt.Errorf("converter first param error: %v", err),
OrigError: fmt.Errorf("converter first param error: %w", err),
}
}
}
@ -130,7 +130,7 @@ func filterToJson(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "to_json",
OrigError: fmt.Errorf("parse in to json: %v", err),
OrigError: fmt.Errorf("parse in to json: %w", err),
}
}
result := string(data)
@ -148,7 +148,7 @@ func filterToYaml(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2
if err != nil {
return pongo2.AsValue(nil), &pongo2.Error{
Sender: "to_yaml",
OrigError: fmt.Errorf("parse in to json: %v", err),
OrigError: fmt.Errorf("parse in to json: %w", err),
}
}
result := string(data)

View File

@ -32,12 +32,10 @@ import (
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/converter"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
"github.com/kubesphere/kubekey/v4/pkg/modules"
"github.com/kubesphere/kubekey/v4/pkg/project"
"github.com/kubesphere/kubekey/v4/pkg/proxy"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@ -46,19 +44,7 @@ type TaskExecutor interface {
Exec(ctx context.Context) error
}
func NewTaskExecutor(schema *runtime.Scheme, client ctrlclient.Client, pipeline *kubekeyv1.Pipeline) TaskExecutor {
if schema == nil {
schema = _const.Scheme
}
if client == nil {
cli, err := proxy.NewLocalClient()
if err != nil {
return nil
}
client = cli
}
func NewTaskExecutor(client ctrlclient.Client, pipeline *kubekeyv1.Pipeline) TaskExecutor {
// get variable
v, err := variable.GetVariable(client, *pipeline)
if err != nil {
@ -67,7 +53,6 @@ func NewTaskExecutor(schema *runtime.Scheme, client ctrlclient.Client, pipeline
}
return &executor{
schema: schema,
client: client,
pipeline: pipeline,
variable: v,
@ -75,7 +60,6 @@ func NewTaskExecutor(schema *runtime.Scheme, client ctrlclient.Client, pipeline
}
type executor struct {
schema *runtime.Scheme
client ctrlclient.Client
pipeline *kubekeyv1.Pipeline
@ -287,7 +271,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
// complete by pipeline
task.GenerateName = e.pipeline.Name + "-"
task.Namespace = e.pipeline.Namespace
if err := controllerutil.SetControllerReference(e.pipeline, task, e.schema); err != nil {
if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil {
klog.V(4).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
return err
}

View File

@ -20,7 +20,6 @@ import (
"context"
"os"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@ -35,31 +34,17 @@ type commandManager struct {
*kubekeyv1.Inventory
ctrlclient.Client
*runtime.Scheme
}
func (m *commandManager) Run(ctx context.Context) error {
// create config, inventory and pipeline
if err := m.Client.Create(ctx, m.Config); err != nil {
klog.ErrorS(err, "Create config error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
return err
}
if err := m.Client.Create(ctx, m.Inventory); err != nil {
klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
return err
}
if err := m.Client.Create(ctx, m.Pipeline); err != nil {
klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
return err
}
klog.Infof("[Pipeline %s] start", ctrlclient.ObjectKeyFromObject(m.Pipeline))
cp := m.Pipeline.DeepCopy()
defer func() {
klog.Infof("[Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline),
m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed)
// update pipeline status
if err := m.Client.Status().Update(ctx, m.Pipeline); err != nil {
klog.ErrorS(err, "Update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
if err := m.Client.Status().Patch(ctx, m.Pipeline, ctrlclient.MergeFrom(cp)); err != nil {
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
}
if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
@ -72,7 +57,7 @@ func (m *commandManager) Run(ctx context.Context) error {
}()
klog.Infof("[Pipeline %s] start task controller", ctrlclient.ObjectKeyFromObject(m.Pipeline))
if err := executor.NewTaskExecutor(m.Scheme, m.Client, m.Pipeline).Exec(ctx); err != nil {
if err := executor.NewTaskExecutor(m.Client, m.Pipeline).Exec(ctx); err != nil {
klog.ErrorS(err, "Create task controller error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
return err
}

View File

@ -18,11 +18,10 @@ package manager
import (
"context"
"fmt"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/config"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/controllers"
@ -30,7 +29,6 @@ import (
)
type controllerManager struct {
ControllerGates []string
MaxConcurrentReconciles int
LeaderElection bool
}
@ -38,33 +36,25 @@ type controllerManager struct {
func (c controllerManager) Run(ctx context.Context) error {
ctrl.SetLogger(klog.NewKlogr())
restconfig := config.GetConfigOrDie()
proxyTransport, err := proxy.NewProxyTransport(false)
restconfig, err := proxy.NewConfig()
if err != nil {
klog.ErrorS(err, "Create proxy transport error")
return err
return fmt.Errorf("could not get rest config: %w", err)
}
restconfig.Transport = proxyTransport
mgr, err := ctrl.NewManager(restconfig, ctrl.Options{
Scheme: _const.Scheme,
LeaderElection: c.LeaderElection,
LeaderElectionID: "controller-leader-election-kk",
})
if err != nil {
klog.ErrorS(err, "Create manager error")
return err
return fmt.Errorf("could not create controller manager: %w", err)
}
if err := (&controllers.PipelineReconciler{
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor("pipeline"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(ctx, mgr, controllers.Options{
ControllerGates: c.ControllerGates,
Options: ctrlcontroller.Options{
MaxConcurrentReconciles: c.MaxConcurrentReconciles,
},
}); err != nil {
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor("pipeline"),
Scheme: mgr.GetScheme(),
MaxConcurrentReconciles: c.MaxConcurrentReconciles,
}).SetupWithManager(mgr); err != nil {
klog.ErrorS(err, "create pipeline controller error")
return err
}

View File

@ -19,11 +19,9 @@ package manager
import (
"context"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/proxy"
)
// Manager shared dependencies such as Addr and , and provides them to Runnable.
@ -36,32 +34,26 @@ type CommandManagerOptions struct {
*kubekeyv1.Pipeline
*kubekeyv1.Config
*kubekeyv1.Inventory
ctrlclient.Client
}
func NewCommandManager(o CommandManagerOptions) (Manager, error) {
client, err := proxy.NewLocalClient()
if err != nil {
klog.V(4).ErrorS(err, "Failed to create local client")
return nil, err
}
func NewCommandManager(o CommandManagerOptions) Manager {
return &commandManager{
Pipeline: o.Pipeline,
Config: o.Config,
Inventory: o.Inventory,
Client: client,
Scheme: _const.Scheme,
}, nil
Client: o.Client,
}
}
type ControllerManagerOptions struct {
ControllerGates []string
MaxConcurrentReconciles int
LeaderElection bool
}
func NewControllerManager(o ControllerManagerOptions) Manager {
return &controllerManager{
ControllerGates: o.ControllerGates,
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
LeaderElection: o.LeaderElection,
}

View File

@ -73,13 +73,13 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %v", srcParam, err)
return fmt.Errorf("walk dir %s error: %w", srcParam, err)
}
// get file old mode
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %v", err)
return fmt.Errorf("get file info error: %w", err)
}
mode := info.Mode()
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
@ -88,11 +88,11 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
// read file
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("read file error: %v", err)
return fmt.Errorf("read file error: %w", err)
}
// copy file to remote
if err := conn.CopyFile(ctx, data, path, mode); err != nil {
return fmt.Errorf("copy file error: %v", err)
return fmt.Errorf("copy file error: %w", err)
}
return nil
}); err != nil {
@ -130,7 +130,7 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %v", srcParam, err)
return fmt.Errorf("walk dir %s error: %w", srcParam, err)
}
info, err := d.Info()

View File

@ -41,7 +41,7 @@ func TestCopy(t *testing.T) {
Host: "local",
Variable: &testVariable{},
},
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
output: []byte("success"),
}),
exceptStderr: "\"src\" or \"content\" in args should be string",
@ -55,7 +55,7 @@ func TestCopy(t *testing.T) {
Host: "local",
Variable: &testVariable{},
},
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
output: []byte("success"),
}),
exceptStderr: "\"dest\" in args should be string",
@ -69,7 +69,7 @@ func TestCopy(t *testing.T) {
Host: "local",
Variable: &testVariable{},
},
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
output: []byte("success"),
}),
exceptStderr: "\"content\" should copy to a file",
@ -83,7 +83,7 @@ func TestCopy(t *testing.T) {
Host: "local",
Variable: &testVariable{},
},
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
output: []byte("success"),
}),
exceptStdout: "success",
@ -97,7 +97,7 @@ func TestCopy(t *testing.T) {
Host: "local",
Variable: &testVariable{},
},
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
copyErr: fmt.Errorf("copy failed"),
}),
exceptStderr: "copy failed",

View File

@ -19,13 +19,13 @@ package modules
import (
"context"
"fmt"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@ -70,10 +70,12 @@ func init() {
RegisterModule("gen_cert", ModuleGenCert)
}
var ConnKey = struct{}{}
func getConnector(ctx context.Context, host string, data map[string]any) (connector.Connector, error) {
var conn connector.Connector
var err error
if v := ctx.Value("connector"); v != nil {
if v := ctx.Value(ConnKey); v != nil {
conn = v.(connector.Connector)
} else {
conn, err = connector.NewConnector(host, data)

View File

@ -69,13 +69,13 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %v", srcParam, err)
return fmt.Errorf("walk dir %s error: %w", srcParam, err)
}
// get file old mode
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %v", err)
return fmt.Errorf("get file info error: %w", err)
}
mode := info.Mode()
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
@ -84,15 +84,15 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
// read file
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("read file error: %v", err)
return fmt.Errorf("read file error: %w", err)
}
result, err := tmpl.ParseFile(ha.(map[string]any), data)
if err != nil {
return fmt.Errorf("parse file error: %v", err)
return fmt.Errorf("parse file error: %w", err)
}
// copy file to remote
if err := conn.CopyFile(ctx, []byte(result), path, mode); err != nil {
return fmt.Errorf("copy file error: %v", err)
return fmt.Errorf("copy file error: %w", err)
}
return nil
}); err != nil {

View File

@ -54,12 +54,12 @@ func newGitProject(pipeline kubekeyv1.Pipeline, update bool) (Project, error) {
if _, err := os.Stat(p.projectDir); os.IsNotExist(err) {
// git clone
if err := p.gitClone(context.Background()); err != nil {
return nil, fmt.Errorf("clone git project error: %v", err)
return nil, fmt.Errorf("clone git project error: %w", err)
}
} else if update {
// git pull
if err := p.gitPull(context.Background()); err != nil {
return nil, fmt.Errorf("pull git project error: %v", err)
return nil, fmt.Errorf("pull git project error: %w", err)
}
}
return p, nil

View File

@ -167,7 +167,7 @@ func (s fileStorage) GetList(ctx context.Context, key string, opts apistorage.Li
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
return fmt.Errorf("need ptr to slice: %w", err)
}
// lastKey in result.
@ -190,7 +190,7 @@ func (s fileStorage) GetList(ctx context.Context, key string, opts apistorage.Li
continueKey, _, err := apistorage.DecodeContinue(opts.Predicate.Continue, key)
if err != nil {
klog.V(4).ErrorS(err, "failed to parse continueKey", "continueKey", opts.Predicate.Continue)
return fmt.Errorf("invalid continue token: %v", err)
return fmt.Errorf("invalid continue token: %w", err)
}
startReadOnce := sync.Once{}
continueKeyMatchRule = func(key string) bool {
@ -206,7 +206,7 @@ func (s fileStorage) GetList(ctx context.Context, key string, opts apistorage.Li
case len(opts.ResourceVersion) > 0:
parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return fmt.Errorf("invalid resource version: %v", err)
return fmt.Errorf("invalid resource version: %w", err)
}
switch opts.ResourceVersionMatch {
case metav1.ResourceVersionMatchNotOlderThan:
@ -487,7 +487,7 @@ func (s fileStorage) RequestWatchProgress(ctx context.Context) error {
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, value []byte, objPtr runtime.Object) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
return fmt.Errorf("unable to convert output object to pointer: %w", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {

View File

@ -23,7 +23,6 @@ import (
"net/http"
"sort"
"strings"
"sync"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -41,7 +40,7 @@ import (
apirest "k8s.io/apiserver/pkg/registry/rest"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
@ -54,40 +53,40 @@ import (
"github.com/kubesphere/kubekey/v4/pkg/proxy/resources/task"
)
var proxyTransport http.RoundTripper
var initOnce sync.Once
func Init() error {
var err error
initOnce.Do(func() {
proxyTransport, err = NewProxyTransport(true)
if err != nil {
klog.V(4).ErrorS(err, "failed to create local transport")
return
}
})
return err
}
func NewLocalClient() (ctrlclient.Client, error) {
return ctrlclient.New(&rest.Config{
Transport: proxyTransport,
}, ctrlclient.Options{
Scheme: _const.Scheme,
})
func NewConfig() (*rest.Config, error) {
restconfig, err := ctrl.GetConfig()
if err != nil {
klog.Infof("kubeconfig in empty, store resources local")
restconfig = &rest.Config{}
}
restconfig.Transport, err = newProxyTransport(restconfig)
if err != nil {
return nil, fmt.Errorf("create proxy transport error: %w", err)
}
restconfig.TLSClientConfig = rest.TLSClientConfig{}
return restconfig, nil
}
// NewProxyTransport return a new http.RoundTripper use in ctrl.client.
// if the resources group version is kubekey.kubesphere.io/v1alpha. store it in local.
// if the resources group version is kubekey.kubesphere.io/v1 and isLocal is true. store it in local.
// if the resources group version is kubekey.kubesphere.io/v1 and isLocal is true. send remote s http request.
func NewProxyTransport(isLocal bool) (http.RoundTripper, error) {
// when restConfig is not empty: should connect a kubernetes cluster and store some resources in there.
// such as: pipeline.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1
// when restConfig is empty: store all resource in local.
//
// SPECIFICALLY: since tasks is running data, which is reentrant and large in quantity,
// they should always store in local.
func newProxyTransport(restConfig *rest.Config) (http.RoundTripper, error) {
lt := &transport{
isLocal: isLocal,
authz: authorizerfactory.NewAlwaysAllowAuthorizer(),
handlerChainFunc: defaultHandlerChain,
}
if restConfig.Host != "" {
clientFor, err := rest.HTTPClientFor(restConfig)
if err != nil {
return nil, err
}
lt.restClient = clientFor
}
// register kubekeyv1alpha1 resources
kkv1alpha1 := newApiIResources(kubekeyv1alpha1.SchemeGroupVersion)
storage, err := task.NewStorage(internal.NewFileRESTOptionsGetter(kubekeyv1alpha1.SchemeGroupVersion))
@ -113,7 +112,8 @@ func NewProxyTransport(isLocal bool) (http.RoundTripper, error) {
klog.V(4).ErrorS(err, "failed to register resources")
}
if isLocal {
// when restConfig is null. should store all resource local
if restConfig.Host == "" {
// register kubekeyv1 resources
kkv1 := newApiIResources(kubekeyv1.SchemeGroupVersion)
// add config
@ -190,8 +190,8 @@ func (r *responseWriter) WriteHeader(statusCode int) {
}
type transport struct {
// isLocal represent the transport use local file client or http client
isLocal bool
// use to connect remote
restClient *http.Client
authz authorizer.Authorizer
// routers is a list of routers
@ -202,10 +202,10 @@ type transport struct {
}
func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) {
// kubekey.v1alpha1 always use local client
if !l.isLocal && !strings.HasPrefix(request.URL.Path, "/apis/"+kubekeyv1alpha1.SchemeGroupVersion.String()+"/") {
return http.DefaultTransport.RoundTrip(request)
if l.restClient != nil && !strings.HasPrefix(request.URL.Path, "/apis/"+kubekeyv1alpha1.SchemeGroupVersion.String()) {
return l.restClient.Transport.RoundTrip(request)
}
response := &http.Response{
Proto: "local",
Header: make(http.Header),

View File

@ -19,7 +19,6 @@ package variable
import (
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"regexp"
"strconv"
@ -30,7 +29,6 @@ import (
"sigs.k8s.io/yaml"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
)
@ -251,11 +249,6 @@ func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error
return result, nil
}
func RuntimeDirFromPipeline(obj kubekeyv1.Pipeline) string {
return filepath.Join(_const.GetRuntimeDir(), kubekeyv1.SchemeGroupVersion.String(),
_const.RuntimePipelineDir, obj.Namespace, obj.Name, _const.RuntimePipelineVariableDir)
}
// GetValue from VariableData by key path
func GetValue(value map[string]any, keys string) any {
switch {

View File

@ -29,6 +29,7 @@ import (
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/variable/source"
)
@ -45,9 +46,9 @@ type Variable interface {
// New variable. generate value from config args. and render to source.
func New(client ctrlclient.Client, pipeline kubekeyv1.Pipeline) (Variable, error) {
// new source
s, err := source.New(RuntimeDirFromPipeline(pipeline))
s, err := source.New(filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir))
if err != nil {
klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(RuntimeDirFromPipeline(pipeline)), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
return nil, err
}
// get config