Uninstall docker interface (#2478)

Signed-off-by: joyceliu <joyceliu@yunify.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
liujian 2025-03-05 18:55:12 +08:00 committed by GitHub
parent 3e56b095de
commit 86ff6371b6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7028 changed files with 19072 additions and 2036008 deletions

View File

@ -1,3 +1,35 @@
bin
example
exp
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
*.tmp
.DS_Store
# Test binary, build with `go test -c`
*.test
# IntelliJ
.idea/
*.iml
# Vscode files
.vscode
# rbac and manager config for example provider
manager_image_patch.yaml-e
manager_pull_policy.yaml-e
# Sample config files auto-generated by kubebuilder
config/samples
# test results
_artifacts
# Used during parts of the build process. Files _should_ get cleaned up automatically.
# This is also a good location for any temporary manfiests used during development
tmp
# Used by current object
/_output/
dist/

View File

@ -40,7 +40,6 @@ jobs:
- name: Set up Docker buildx
uses: docker/setup-buildx-action@v3
- name: Build and push Docker images
run: |
tag=${{ steps.prepare.outputs.version }}

74
.github/workflows/golangci-lint.yaml vendored Normal file
View File

@ -0,0 +1,74 @@
name: GolangCILint
on:
pull_request:
types: [opened, edited, synchronize, reopened]
# Remove all permissions from GITHUB_TOKEN except metadata.
permissions: {}
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.23.3
- name: Sync mod
run: make generate-modules
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.63.3
verify:
name: verify
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.23.3
- name: Sync mod
run: make generate-modules
- name: Verify
run: ALL_VERIFY_CHECKS="goimports releaser" make verify
test:
name: test
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.23.3
- name: Sync mod
run: make generate-modules
- name: Test
run: make test

View File

@ -1,74 +0,0 @@
name: golangci-lint
on:
pull_request:
types: [opened, edited, synchronize, reopened]
# Remove all permissions from GITHUB_TOKEN except metadata.
permissions: {}
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.22
- name: Sync mod
run: make generate-modules
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.59.1
verify:
name: verify
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.22
- name: Sync mod
run: make generate-modules
- name: Verify
run: ALL_VERIFY_CHECKS="goimports releaser" make verify
test:
name: test
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup golang
uses: actions/setup-go@v5
with:
go-version: 1.22
- name: Sync mod
run: make generate-modules
- name: Test
run: make test

View File

@ -1,40 +0,0 @@
name: goreleaser
on:
push:
tags:
- 'v4*'
permissions:
contents: write
jobs:
goreleaser:
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prepare
run: |
LDFLAGS=$(bash hack/version.sh)
echo "ldflags=${LDFLAGS}" >> "$GITHUB_OUTPUT"
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.21
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: ${{ env.GITHUB_REF_NAME }}
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
LDFLAGS: ${{ steps.prepare.outputs.ldflags }}

53
.github/workflows/releaser.yaml vendored Normal file
View File

@ -0,0 +1,53 @@
name: BuildReleaser
on:
push:
tags:
- 'v4*'
jobs:
goreleaser:
runs-on: ubuntu-latest
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare
id: prepare
run: |
LDFLAGS=$(bash hack/version.sh)
VERSION=latest
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
echo "ldflags=${LDFLAGS}" >> "$GITHUB_OUTPUT"
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.23.3
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: v2.5.1
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
LDFLAGS: ${{ steps.prepare.outputs.ldflags }}
- name: Upload Extra File
run: |
TAG=${{ steps.prepare.outputs.version }}
if [[ $tag =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
REGISTRY=docker.io/kubesphere TAG=$tag make generate
else
REGISTRY=docker.io/kubespheredev TAG=$tag make generate
fi
gh release upload "$TAG" config/capkk/release/* --clobber
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,42 +0,0 @@
name: SyncVendor
on:
push:
paths:
- go.mod
workflow_dispatch:
jobs:
vendor:
name: sync vendor
runs-on: self-runner-kubesphere
if: github.repository == 'kubesphere/kubekey'
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '1.22.0'
- name: Run go mod vendor
run: go mod vendor
- name: Commit vendor directory
run: |
git config --global user.name 'ks-ci-bot'
git config --global user.email 'ci-bot@kubesphere.io'
git add vendor
git commit -m 'Add vendor directory'
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
with:
commit-message: "Add vendor directory"
branch: "add-vendor-${{ github.sha }}"
delete-branch: true
title: "[ci-bot] Add vendor directory"
body: |
This [PR](https://github.com/kubesphere/kubekey/pull/${{ github.sha }}) adds the vendor directory generated by go mod vendor.

6
.gitignore vendored
View File

@ -9,12 +9,6 @@
# Test binary, build with `go test -c`
*.test
# E2E test templates
test/e2e/data/infrastructure-kubekey/v1beta1/cluster-template*.yaml
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# IntelliJ
.idea/
*.iml

1
.go-version Normal file
View File

@ -0,0 +1 @@
1.23.3

View File

@ -21,9 +21,9 @@ linters:
- errchkjson
- errname
- errorlint
- exhaustive
# - exhaustive
# - exhaustruct
- exportloopref
- copyloopvar
- fatcontext
- forbidigo
- forcetypeassert
@ -39,12 +39,12 @@ linters:
- gocritic
- gocyclo
- godot
- godox
# - godox
- gofmt
# - gofumpt
- goheader
- goimports
- gomoddirectives
# - gomoddirectives
- gomodguard
- goprintffuncname
- gosec
@ -68,7 +68,7 @@ linters:
# - musttag
- nakedret
- nestif
- nilerr
# - nilerr
- nilnil
- nlreturn
- noctx
@ -152,11 +152,6 @@ linters-settings:
- k8s.io
- sigs.k8s.io
- github.com/kubesphere/kubekey
exhaustive:
# Enum types matching the supplied regex do not have to be listed in
# switch statements to satisfy exhaustiveness.
# Default: ""
ignore-enum-types: "fsnotify.Op|v1alpha1.TaskPhase|reflect.Kind"
forbidigo:
# Forbid the following identifiers (list of regexp).
# Default: ["^(fmt\\.Print(|f|ln)|print|println)$"]
@ -241,6 +236,8 @@ linters-settings:
- pkg: github.com/opencontainers/image-spec/specs-go/v1
alias: imagev1
# Kubernetes
- pkg: "k8s.io/api/coordination/v1"
alias: coordinationv1
- pkg: "k8s.io/api/core/v1"
alias: corev1
- pkg: "k8s.io/api/batch/v1"
@ -275,12 +272,21 @@ linters-settings:
# kubekey
- pkg: "github.com/kubesphere/kubekey/v4/pkg/const"
alias: _const
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
- pkg: "github.com/kubesphere/kubekey/api/core/v1"
alias: kkcorev1
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
- pkg: "github.com/kubesphere/kubekey/api/core/v1alpha1"
alias: kkcorev1alpha1
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
- pkg: "github.com/kubesphere/kubekey/api/project/v1"
alias: kkprojectv1
- pkg: "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
alias: capkkinfrav1beta1
# cluster-api
- pkg: "sigs.k8s.io/cluster-api/api/v1beta1"
alias: clusterv1beta1
- pkg: "sigs.k8s.io/cluster-api/util"
alias: clusterutil
- pkg: "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
alias: kubeadmcpv1beta1
nestif:
# Minimal complexity of if statements to report.
# Default: 5
@ -733,7 +739,7 @@ linters-settings:
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
- name: unused-parameter
severity: warning
disabled: false
disabled: true
exclude: [""]
arguments:
- allowRegex: "^_"
@ -890,10 +896,8 @@ issues:
run:
timeout: 10m
go: "1.22"
go: "1.23"
build-tags:
- builtin
executor-dirs:
- .git
- vendor
- clusterapi
allow-parallel-runners: true

439
Makefile
View File

@ -6,7 +6,7 @@ SHELL:=/usr/bin/env bash
#
# Go.
#
GO_VERSION ?= 1.22
GO_VERSION ?= 1.23.3
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
GOARCH ?= $(shell go env GOARCH)
GOOS ?= $(shell go env GOOS)
@ -56,17 +56,17 @@ export PATH := $(abspath $(OUTPUT_BIN_DIR)):$(abspath $(OUTPUT_TOOLS_DIR)):$(PAT
# Binaries.
#
# Note: Need to use abspath so we can invoke these from subdirectories
KUSTOMIZE_VER := v4.5.2
KUSTOMIZE_VER := v5.5.0
KUSTOMIZE_BIN := kustomize
KUSTOMIZE := $(abspath $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4
KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v5
SETUP_ENVTEST_VER := v0.0.0-20240521074430-fbb7d370bebc
SETUP_ENVTEST_BIN := setup-envtest
SETUP_ENVTEST := $(abspath $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER))
SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest
CONTROLLER_GEN_VER := v0.15.0
CONTROLLER_GEN_VER := main
CONTROLLER_GEN_BIN := controller-gen
CONTROLLER_GEN := $(abspath $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER))
CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen
@ -79,15 +79,15 @@ GOTESTSUM_PKG := gotest.tools/gotestsum
HADOLINT_VER := v2.10.0
HADOLINT_FAILURE_THRESHOLD = warning
GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yaml | grep [[:space:]]version | sed 's/.*version: //')
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN))
GOLANGCI_LINT := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER))
GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
GORELEASER_VERSION := v2.0.1
GORELEASER_VER := $(shell cat .github/workflows/releaser.yaml | grep [[:space:]]version | sed 's/.*version: //')
GORELEASER_BIN := goreleaser
GORELEASER := $(abspath $(OUTPUT_TOOLS_DIR)/$(GORELEASER_BIN)-$(GORELEASER_VER))
GORELEASER_PKG := github.com/goreleaser/goreleaser/v2
GORELEASER := $(abspath $(OUTPUT_TOOLS_DIR)/$(GORELEASER_BIN))
#
# Docker.
@ -101,49 +101,26 @@ DOCKER_PUSH ?= $(DOCKER_BUILD) --platform $(PLATFORM) $(DOCKER_OUT_TYPE)
# Define Docker related variables. Releases should modify and double check these vars.
REGISTRY ?= docker.io/kubespheredev
#REGISTRY ?= docker.io/kubespheredev
#PROD_REGISTRY ?= docker.io/kubesphere
# capkk
#CAPKK_IMAGE_NAME ?= capkk-controller
#CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
CAPKK_CONTROLLER_IMG_NAME ?= capkk-controller-manager
CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_CONTROLLER_IMG_NAME)
# controller-manager
OPERATOR_IMAGE_NAME ?= kk-controller-manager
OPERATOR_CONTROLLER_IMG ?= $(REGISTRY)/$(OPERATOR_IMAGE_NAME)
KK_CONTROLLER_IMG_NAME ?= kk-controller-manager
KK_CONTROLLER_IMG ?= $(REGISTRY)/$(KK_CONTROLLER_IMG_NAME)
# executor
EXECUTOR_IMAGE_NAME ?= kk-executor
EXECUTOR_CONTROLLER_IMG ?= $(REGISTRY)/$(EXECUTOR_IMAGE_NAME)
# bootstrap
#K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
#K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME)
# control plane
#K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller
#K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
KK_EXECUTOR_IMG_NAME ?= kk-executor
KK_EXECUTOR_IMG ?= $(REGISTRY)/$(KK_EXECUTOR_IMG_NAME)
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
TAG ?= dev
#ALL_ARCH = amd64 arm arm64 ppc64le s390x
# Allow overriding the imagePullPolicy
#PULL_POLICY ?= Always
# Hosts running SELinux need :z added to volume mounts
#SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0)
#
#ifeq ($(SELINUX_ENABLED),1)
# DOCKER_VOL_OPTS?=:z
#endif
# Set build time variables including version details
LDFLAGS := $(shell hack/version.sh)
# Set kk build tags
#BUILDTAGS = exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
BUILDTAGS ?= builtin
#.PHONY: all
#all: test managers
@ -158,35 +135,44 @@ help: ## Display this help.
##@ generate:
.PHONY: generate
generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
$(MAKE) generate-go-deepcopy-kubekey generate-manifests-kubekey generate-manifests-capkk generate-modules generate-goimports
generate: generate-go-deepcopy generate-manifests-kubekey generate-manifests-capkk generate-modules generate-goimports ## Run all generate-manifests-*, generate-go-deepcopy-* targets
.PHONY: generate-go-deepcopy-kubekey
generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
$(MAKE) clean-generated-deepcopy SRC_DIRS="./pkg/apis/"
$(CONTROLLER_GEN) \
.PHONY: generate-go-deepcopy
generate-go-deepcopy: $(CONTROLLER_GEN) ## Generate deepcopy object
$(MAKE) clean-generated-deepcopy SRC_DIRS="./api/"
@$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./pkg/apis/...
paths=./api/...
.PHONY: generate-manifests-kubekey
generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate kubekey manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) \
paths=./pkg/apis/core/... \
crd \
output:crd:dir=./config/kubekey/crds/
generate-manifests-kubekey: $(CONTROLLER_GEN) clean-crds-kubekey ## Generate kubekey manifests e.g. CRD, RBAC etc.
@$(CONTROLLER_GEN) \
paths=./api/core/... \
crd output:crd:dir=./config/kubekey/crds/
@$(CONTROLLER_GEN) \
paths=./pkg/controllers/core/... \
rbac:roleName=kubekey output:rbac:dir=./config/kubekey/templates/
.PHONY: generate-manifests-capkk
generate-manifests-capkk: $(CONTROLLER_GEN) ## Generate capkk manifests e.g. CRD, RBAC etc.
$(CONTROLLER_GEN) \
paths=./pkg/apis/capkk/... \
crd webhook \
output:crd:dir=./config/capkk/crds/ \
output:webhook:dir=./config/capkk/webhook/
generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) clean-crds-capkk ## Generate capkk manifests e.g. CRD, RBAC etc.
@$(CONTROLLER_GEN) \
paths=./api/capkk/... \
crd \
output:crd:dir=./config/capkk/crds/
@$(CONTROLLER_GEN) \
paths=./pkg/controllers/... \
rbac:roleName=capkk output:rbac:dir=./config/capkk/rbac \
webhook output:webhook:dir=./config/capkk/webhook
@cp ./config/kubekey/crds/* ./config/capkk/crds/
@cd config/capkk && $(KUSTOMIZE) edit set image capkk-controller-manager-image=$(CAPKK_CONTROLLER_IMG):$(TAG) kk-controller-manager-image=$(KK_CONTROLLER_IMG):$(TAG)
@$(KUSTOMIZE) build config/capkk | \
yq eval '.metadata |= select(.name == "default-capkk") *+ {"annotations": {"cert-manager.io/inject-ca-from": "capkk-system/capkk-serving-cert"}}' | \
yq eval '.spec.template.spec.containers[] |= (select(.name == "controller-manager") | .env[] |= (select(.name == "EXECUTOR_IMAGE") | .value = "$(KK_EXECUTOR_IMG):$(TAG)"))' \
> config/capkk/release/infrastructure-components.yaml
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
@go mod tidy && go mod vendor
@go mod tidy
.PHONY: generate-goimports
generate-goimports: ## Format all import, `goimports` is required.
@hack/update-goimports.sh
@ -199,12 +185,12 @@ generate-goimports: ## Format all import, `goimports` is required.
.PHONY: lint
lint: $(GOLANGCI_LINT) ## Lint the codebase
$(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
@$(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
@cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
.PHONY: verify-dockerfiles
verify-dockerfiles:
./hack/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
@./hack/ci-lint-dockerfiles.sh $(HADOLINT_VER) $(HADOLINT_FAILURE_THRESHOLD)
ALL_VERIFY_CHECKS ?= modules gen goimports releaser
@ -248,78 +234,35 @@ kk: ## build kk binary
@CGO_ENABLED=0 GOARCH=$(GOARCH) GOOS=$(GOOS) go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(OUTPUT_BIN_DIR)/kk cmd/kk/kubekey.go
.PHONY: kk-releaser
kk-releaser: $(GORELEASER_BIN)
LDFLAGS=$(bash ./hack/version.sh) $(GORELEASER_BIN) release --clean --skip validate --skip publish
kk-releaser: $(GORELEASER) ## build releaser in dist. it will show in https://github.com/kubesphere/kubekey/releases
@LDFLAGS=$(bash ./hack/version.sh) $(GORELEASER) release --clean --skip validate --skip publish
.PHONY: docker-build ## build and push all images
docker-build: docker-build-operator docker-build-kk
.PHONY: docker-push ## build and push all images
docker-push: docker-push-kk-executor docker-push-kk-controller-manager docker-push-capkk-controller-manager
.PHONY: docker-build-operator
docker-build-operator: ## Build the docker image for operator
.PHONY: docker-push-kk-executor
docker-push-kk-executor: ## Build the docker image for kk-executor
@$(DOCKER_PUSH) \
--build-arg builder_image=$(GO_CONTAINER_IMAGE) \
--build-arg goproxy=$(GOPROXY) \
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
-f build/controller-manager/Dockerfile -t $(OPERATOR_CONTROLLER_IMG):$(TAG) .
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="" \
-f build/kk/Dockerfile -t $(KK_EXECUTOR_IMG):$(TAG) .
.PHONY: docker-build-kk
docker-build-kk: ## Build the docker image for kk
.PHONY: docker-push-kk-controller-manager
docker-push-kk-controller-manager: ## Build the docker image for kk-controller-manager
@$(DOCKER_PUSH) \
--build-arg builder_image=$(GO_CONTAINER_IMAGE) \
--build-arg goproxy=$(GOPROXY) \
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="$(BUILDTAGS)" \
-f build/kk/Dockerfile -t $(EXECUTOR_CONTROLLER_IMG):$(TAG) .
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="builtin" \
-f build/controller-manager/Dockerfile -t $(KK_CONTROLLER_IMG):$(TAG) .
#ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
#.PHONY: managers
#managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
#
#.PHONY: manager-capkk
#manager-capkk: ## Build the capkk manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey/v3
#
#.PHONY: manager-k3s-bootstrap
#manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/v3/bootstrap/k3s
#
#.PHONY: manager-k3s-control-plane
#manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
# go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/v3/controlplane/k3s
#
#.PHONY: docker-pull-prerequisites
#docker-pull-prerequisites:
# docker pull docker.io/docker/dockerfile:1.4
# docker pull $(GO_CONTAINER_IMAGE)
#
#.PHONY: docker-build-all
#docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images for all architectures
#
#docker-build-%:
# $(MAKE) ARCH=$* docker-build
#
#ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
#
#.PHONY: docker-build
#docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
# $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
#
#.PHONY: docker-build-capkk
#docker-build-capkk: ## Build the docker image for capkk
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-k3s-bootstrap
#docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-k3s-control-plane
#docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
# DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-build-e2e
#docker-build-e2e: ## Build the docker image for capkk
# $(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
.PHONY: docker-push-capkk-controller-manager
docker-push-capkk-controller-manager: ## Build the docker image for capkk-controller-manager
@$(DOCKER_PUSH) \
--build-arg builder_image=$(GO_CONTAINER_IMAGE) \
--build-arg goproxy=$(GOPROXY) \
--build-arg ldflags="$(LDFLAGS)" --build-arg build_tags="clusterapi" \
-f build/controller-manager/Dockerfile -t $(CAPKK_CONTROLLER_IMG):$(TAG) .
## --------------------------------------
## Deployment
@ -329,31 +272,7 @@ docker-build-kk: ## Build the docker image for kk
.PHONY: helm-package
helm-package: ## Helm-package.
helm package config/helm -d $(OUTPUT_DIR)
#ifndef ignore-not-found
# ignore-not-found = false
#endif
#
#.PHONY: install
#install: generate $(KUSTOMIZE) ## Install CRDs into the K8s cluster specified in ~/.kube/config.
# $(KUSTOMIZE) build config/crd | kubectl apply -f -
#
#.PHONY: uninstall
#uninstall: generate $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
# $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
#
#.PHONY: deploy
#deploy: generate $(KUSTOMIZE) ## Deploy controller to the K8s cluster specified in ~/.kube/config.
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME)-$(ARCH) MANIFEST_TAG=$(TAG) \
# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
# cd config/manager
# $(KUSTOMIZE) build config/default | kubectl apply -f -
#
#.PHONY: undeploy
#undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
# $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
@helm package config/helm -d $(OUTPUT_DIR)
## --------------------------------------
## Testing
@ -361,186 +280,13 @@ helm-package: ## Helm-package.
##@ test:
#ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available
# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
#else
# KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION))
#endif
.PHONY: test
test: $(SETUP_ENVTEST) ## Run unit and integration tests
KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
@KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS)
.PHONY: test-verbose
test-verbose: ## Run unit and integration tests with verbose flag
$(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
#
#.PHONY: test-junit
#test-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests and generate a junit report
# set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.exitcode) | tee $(ARTIFACTS)/junit.stdout
# $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.xml --raw-command cat $(ARTIFACTS)/junit.stdout
# exit $$(cat $(ARTIFACTS)/junit.exitcode)
#
#.PHONY: test-cover
#test-cover: ## Run unit and integration tests and generate a coverage report
# $(MAKE) test TEST_ARGS="$(TEST_ARGS) -coverprofile=out/coverage.out"
# go tool cover -func=out/coverage.out -o out/coverage.txt
# go tool cover -html=out/coverage.out -o out/coverage.html
#
#.PHONY: test-e2e
#test-e2e: ## Run e2e tests
# $(MAKE) -C $(TEST_DIR)/e2e run
#
#.PHONY: test-e2e-k3s
#test-e2e-k3s: ## Run e2e tests
# $(MAKE) -C $(TEST_DIR)/e2e run-k3s
## --------------------------------------
## Release
## --------------------------------------
##@ release:
## latest git tag for the commit, e.g., v0.3.10
#RELEASE_TAG ?= $(shell git describe --abbrev=0 2>/dev/null)
#ifneq (,$(findstring -,$(RELEASE_TAG)))
# PRE_RELEASE=true
#endif
## the previous release tag, e.g., v0.3.9, excluding pre-release tags
#PREVIOUS_TAG ?= $(shell git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$$" | sort -V | grep -B1 $(RELEASE_TAG) | head -n 1 2>/dev/null)
#RELEASE_DIR := out
#
#$(RELEASE_DIR):
# mkdir -p $(RELEASE_DIR)/
#
#.PHONY: release
#release: clean-release ## Build and push container images using the latest git tag for the commit
# @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi
# @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi
# git checkout "${RELEASE_TAG}"
# ## Build binaries first.
# GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries
# # Set the manifest image to the production bucket.
# $(MAKE) manifest-modification REGISTRY=$(PROD_REGISTRY)
# ## Build the manifests
# $(MAKE) release-manifests
# ## Build the templates
# $(MAKE) release-templates
# ## Clean the git artifacts modified in the release process
# $(MAKE) clean-release-git
#
#release-binaries: ## Build the binaries to publish with a release
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=amd64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=linux GOARCH=arm64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=amd64 $(MAKE) release-archive
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-binary
# RELEASE_BINARY=./cmd/kk GOOS=darwin GOARCH=arm64 $(MAKE) release-archive
#
#release-binary: $(RELEASE_DIR)
# docker run \
# --rm \
# -e CGO_ENABLED=0 \
# -e GOOS=$(GOOS) \
# -e GOARCH=$(GOARCH) \
# -e GOPROXY=$(GOPROXY) \
# -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \
# -w /workspace \
# golang:$(GO_VERSION) \
# go build -a -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \
# -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY)) $(RELEASE_BINARY)
#
#release-archive: $(RELEASE_DIR)
# tar -czf $(RELEASE_DIR)/kubekey-$(RELEASE_TAG)-$(GOOS)-$(GOARCH).tar.gz -C $(RELEASE_DIR)/ $(notdir $(RELEASE_BINARY))
# rm -rf $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))
#
#.PHONY: manifest-modification
#manifest-modification: # Set the manifest images to the staging/production bucket.
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(CAPKK_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-image \
# MANIFEST_IMG=$(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) MANIFEST_TAG=$(RELEASE_TAG) \
# TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
# $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
#
#.PHONY: release-manifests
#release-manifests: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the manifests to publish with a release
# # Build capkk-components.
# $(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml
# # Build bootstrap-components.
# $(KUSTOMIZE) build bootstrap/k3s/config/default > $(RELEASE_DIR)/bootstrap-components.yaml
# # Build control-plane-components.
# $(KUSTOMIZE) build controlplane/k3s/config/default > $(RELEASE_DIR)/control-plane-components.yaml
#
# # Add metadata to the release artifacts
# cp metadata.yaml $(RELEASE_DIR)/metadata.yaml
#
#.PHONY: release-templates
#release-templates: $(RELEASE_DIR) ## Generate release templates
# cp templates/cluster-template*.yaml $(RELEASE_DIR)/
#
#.PHONY: release-prod
#release-prod: ## Build and push container images to the prod
# REGISTRY=$(PROD_REGISTRY) TAG=$(RELEASE_TAG) $(MAKE) docker-build-all docker-push-all
## --------------------------------------
## Docker
## --------------------------------------
#
#.PHONY: docker-push-all
#docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests
# $(MAKE) docker-push-manifest-capkk
# $(MAKE) docker-push-manifest-k3s-bootstrap
# $(MAKE) docker-push-manifest-k3s-control-plane
#
#docker-push-%:
# $(MAKE) ARCH=$* docker-push
#
#.PHONY: docker-push
#docker-push: ## Push the docker images
# docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
# docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
# docker push $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
#
#.PHONY: docker-push-manifest-capkk
#docker-push-manifest-capkk: ## Push the multiarch manifest for the capkk docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(CAPKK_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPKK_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPKK_CONTROLLER_IMG}:${TAG} ${CAPKK_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(CAPKK_CONTROLLER_IMG):$(TAG)
#
#.PHONY: docker-push-manifest-k3s-bootstrap
#docker-push-manifest-k3s-bootstrap: ## Push the multiarch manifest for the k3s bootstrap docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${K3S_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(K3S_BOOTSTRAP_CONTROLLER_IMG):$(TAG)
#
#.PHONY: docker-push-manifest-k3s-control-plane
#docker-push-manifest-k3s-control-plane: ## Push the multiarch manifest for the k3s control plane docker images
# ## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
# docker manifest create --amend $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(K3S_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
# @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${K3S_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
# docker manifest push --purge $(K3S_CONTROL_PLANE_CONTROLLER_IMG):$(TAG)
#
#.PHONY: set-manifest-pull-policy
#set-manifest-pull-policy:
# $(info Updating kustomize pull policy file for manager resources)
# sed -i'' -e 's@imagePullPolicy: .*@imagePullPolicy: '"$(PULL_POLICY)"'@' $(TARGET_RESOURCE)
#
#.PHONY: set-manifest-image
#set-manifest-image:
# $(info Updating kustomize image patch file for manager resource)
# sed -i'' -e 's@image: .*@image: '"${MANIFEST_IMG}:$(MANIFEST_TAG)"'@' $(TARGET_RESOURCE)
@$(MAKE) test TEST_ARGS="$(TEST_ARGS) -v"
## --------------------------------------
## Cleanup / Verification
@ -549,16 +295,19 @@ test-verbose: ## Run unit and integration tests with verbose flag
##@ clean:
.PHONY: clean
clean: ## Remove all generated files
$(MAKE) clean-output clean-generated-deepcopy
clean: clean-output clean-generated-deepcopy clean-crds-kubekey clean-crds-capkk ## Remove all generated files
.PHONY: clean-output
clean-output: ## Remove all generated binaries
rm -rf $(OUTPUT_DIR)
@rm -rf $(OUTPUT_DIR)
#.PHONY: clean-release
#clean-release: ## Remove the release folder
# rm -rf $(RELEASE_DIR)
.PHONY: clean-crds-kubekey
clean-crds-kubekey: ## Remove the generated crds for kubekey
@rm -rf ./config/kubekey/crds
.PHONY: clean-crds-capkk
clean-crds-capkk: ## Remove the generated crds for capkk
@rm -rf ./config/capkk/crds
#.PHONY: clean-release-git
#clean-release-git: ## Restores the git files usually modified during a release
@ -570,7 +319,7 @@ clean-output: ## Remove all generated binaries
#
.PHONY: clean-generated-deepcopy
clean-generated-deepcopy: ## Remove files generated by conversion-gen from the mentioned dirs. Example SRC_DIRS="./api/v1beta1"
(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
@(IFS=','; for i in $(SRC_DIRS); do find $$i -type f -name 'zz_generated.deepcopy*' -exec rm -f {} \;; done)
## --------------------------------------
## Hack / Tools
@ -578,50 +327,32 @@ clean-generated-deepcopy: ## Remove files generated by conversion-gen from the m
##@ hack/tools:
.PHONY: $(CONTROLLER_GEN_BIN)
$(CONTROLLER_GEN_BIN): $(CONTROLLER_GEN) ## Build a local copy of controller-gen.
.PHONY: $(GOTESTSUM_BIN)
$(GOTESTSUM_BIN): $(GOTESTSUM) ## Build a local copy of gotestsum.
.PHONY: $(KUSTOMIZE_BIN)
$(KUSTOMIZE_BIN): $(KUSTOMIZE) ## Build a local copy of kustomize.
.PHONY: $(SETUP_ENVTEST_BIN)
$(SETUP_ENVTEST_BIN): $(SETUP_ENVTEST) ## Build a local copy of setup-envtest.
.PHONY: $(GOLANGCI_LINT_BIN)
$(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
.PHONY: $(GORELEASER)
$(GORELEASER_BIN): $(GORELEASER) ## Build a local copy of golangci-lint
$(CONTROLLER_GEN): # Build controller-gen into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN) ]; then \
@if [ ! -f $(CONTROLLER_GEN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER); \
fi
$(GOTESTSUM): # Build gotestsum into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOTESTSUM_BIN) ]; then \
@if [ ! -f $(GOTESTSUM) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER); \
fi
$(KUSTOMIZE): # Build kustomize into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_PKG) ]; then \
@if [ ! -f $(KUSTOMIZE) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER); \
fi
$(SETUP_ENVTEST): # Build setup-envtest into tools folder.
if [ ! -f $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN) ]; then \
if [ ! -f $(SETUP_ENVTEST) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER); \
fi
$(GOLANGCI_LINT): # Build golangci-lint into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
@if [ ! -f $(GOLANGCI_LINT) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER); \
fi
$(GORELEASER): # Build goreleaser into tools folder.
@if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION); \
@if [ ! -f $(GORELEASER) ]; then \
CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VER); \
fi

View File

@ -0,0 +1,183 @@
/*
Copyright 2024 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
const (
// KKClusterFinalizer allows ReconcileKKCluster to clean up KK resources associated with KKCluster before
// removing it from the API server.
KKClusterFinalizer = "kkcluster.infrastructure.cluster.x-k8s.io"
// KKClusterSSHPrivateKeyAnnotation is the annotation for the secret used for SSH authentication. It contains the SSH private key
// and will be mounted in the executor pod.
KKClusterSSHPrivateKeyAnnotation = "kkcluster.infrastructure.cluster.x-k8s.io/ssh-auth"
)
const (
// KKClusterNodeReachedCondition represents the condition type indicating whether the hosts
// defined in the inventory are reachable.
KKClusterNodeReachedCondition clusterv1beta1.ConditionType = "NodeReached"
// KKClusterNodeReachedConditionReasonWaiting indicates that the node reachability check is pending.
// This check is triggered when the corresponding inventory host's configuration changes.
KKClusterNodeReachedConditionReasonWaiting = "waiting for node status check"
// KKClusterNodeReachedConditionReasonUnreached indicates that the node reachability check has failed.
// This means the node is currently offline or inaccessible.
KKClusterNodeReachedConditionReasonUnreached = "node is unreachable"
// KKClusterKKMachineConditionReady represents the condition type indicating whether the associated inventory
// has been successfully marked as ready.
KKClusterKKMachineConditionReady clusterv1beta1.ConditionType = "KKClusterMachineReady"
// KKClusterKKMachineConditionReadyReasonWaiting indicates that the associated inventory is still being synchronized.
KKClusterKKMachineConditionReadyReasonWaiting = "waiting for kkmachine sync"
// KKMachineKKMachineConditionReasonSyncing indicates that the associated inventory has been successfully synchronized.
KKMachineKKMachineConditionReasonSyncing = "syncing for kkmachine"
// KKMachineKKMachineConditionReasonFailed indicates that the associated inventory synchronization process has failed.
KKMachineKKMachineConditionReasonFailed = "kkmachine run failed"
)
type KKClusterFailedReason string
const (
// KKClusterFailedReasonUnknown like cannot get resource from kubernetes.
KKClusterFailedUnknown KKClusterFailedReason = "unknown"
// KKClusterFailedReasonInvalidHosts like hosts defined in kkcluster is invalid.
KKClusterFailedInvalidHosts KKClusterFailedReason = "hosts defined in kkcluster is invalid."
// KKClusterFailedReasonSyncInventory like failed to sync inventory.
KKClusterFailedSyncInventory KKClusterFailedReason = "failed to sync inventory"
// KKClusterFailedReasonSyncCPKKMachine like failed to sync control_plane kkmachine.
KKClusterFailedSyncCPKKMachine KKClusterFailedReason = "sync control_plane kkmachine failed."
// KKClusterFailedReasonSyncWorkerKKMachine like failed to sync worker kkmachine.
KKClusterFailedSyncWorkerKKMachine KKClusterFailedReason = "sync worker kkmachine failed."
)
// ControlPlaneEndpointType defines the type of control plane endpoint used for communication with the cluster.
type ControlPlaneEndpointType string
const (
// ControlPlaneEndpointTypeDNS indicates the control plane endpoint is a globally resolvable DNS entry.
// ensuring that the configuration always points to the control plane nodes.
ControlPlaneEndpointTypeDNS ControlPlaneEndpointType = "dns"
// ControlPlaneEndpointTypeVIP(DEFAULT) indicates the control plane endpoint is a Virtual IP (VIP).
// - ARP Mode: Requires the management cluster and worker cluster nodes to be in the same network segment.
// - BGP Mode: Requires a network environment that supports BGP, with proper configuration in both
// the management and worker clusters.
ControlPlaneEndpointTypeVIP ControlPlaneEndpointType = "vip"
)
type InventoryHostConnector struct {
// Type to connector the host.
Type string `json:"type,omitempty"`
// Host address. default use host.name.
Host string `json:"host,omitempty"`
// User is the user name of the host. default is root.
// +optional
User string `json:"user,omitempty"`
// Password is the password of the host.
// +optional
Password string `json:"password,omitempty"`
// PrivateKey is the private key of the host. default is ~/.ssh/id_rsa.
// +optional
PrivateKey string `json:"privateKey,omitempty"`
}
type InventoryHost struct {
// Name of the host.
Name string `json:"name,omitempty"`
// Connector to connect the host.
Connector InventoryHostConnector `json:"connector,omitempty"`
// Vars for the host.
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Vars runtime.RawExtension `json:"vars,omitempty"`
}
// KKClusterSpec defines the desired state of KKCluster.
type KKClusterSpec struct {
// InventoryHosts contains all hosts of the cluster.
InventoryHosts []InventoryHost `json:"inventory,omitempty"`
// which Group defined in Inventory will be checked. there is some default group by system:
// - all: contains all hosts
// - ungrouped: contains hosts which do not belong to any groups.
// if the value is empty, "ungrouped" will be used.
HostCheckGroup string `json:"hostCheckGroup,omitempty"`
// tolerate defines if tolerate host check if failed.
Tolerate bool `json:"tolerate,omitempty"`
// ControlPlaneEndpointType defines the type of control plane endpoint. such as dns, vip.
// when use vip, it will deploy kube-vip in each control_plane node. the default value is vip.
ControlPlaneEndpointType ControlPlaneEndpointType `json:"controlPlaneEndpointType,omitempty"`
}
// KKClusterStatus defines the observed state of KKCluster.
type KKClusterStatus struct {
// if Ready to create cluster. usage after inventory is ready.
Ready bool `json:"ready,omitempty"`
// FailureReason
FailureReason KKClusterFailedReason `json:"failureReason,omitempty"`
FailureMessage string `json:"failureMessage,omitempty"`
// Conditions defines current service state of the KKCluster.
// +optional
Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced,categories=cluster-api,shortName=kkc
// +kubebuilder:subresource:status
// +kubebuilder:metadata:labels="cluster.x-k8s.io/v1beta1=v1beta1"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this KKClusters belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for SSH instances"
// +kubebuilder:printcolumn:name="ControlPlaneEndpointType",type="string",JSONPath=".spec.controlPlaneEndpointType",description="the ControlPlaneEndpointType to connect workload cluster"
// KKCluster resource maps a kubernetes cluster, manage and reconcile cluster status.
type KKCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KKClusterSpec `json:"spec,omitempty"`
Status KKClusterStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KKClusterList of KKCluster
type KKClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []KKCluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&KKCluster{}, &KKClusterList{})
}
// GetConditions returns the observations of the operational state of the KKCluster resource.
func (k *KKCluster) GetConditions() clusterv1beta1.Conditions {
return k.Status.Conditions
}
// SetConditions sets the underlying service state of the KKCluster to the predescribed clusterv1beta1.Conditions.
func (k *KKCluster) SetConditions(conditions clusterv1beta1.Conditions) {
k.Status.Conditions = conditions
}

View File

@ -0,0 +1,139 @@
/*
Copyright 2024 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
const (
// MachineFinalizer allows ReconcileKKMachine to clean up KubeKey resources associated with KKMachine before
// removing it from the apiserver.
KKMachineFinalizer = "kkmachine.infrastructure.cluster.x-k8s.io"
// KKMachineBelongGroupLabel defines which kkmachine belong to.
KKMachineBelongGroupLabel = "kkmachine.infrastructure.cluster.x-k8s.io/group"
// AddNodePipelineAnnotation add node to cluster.
AddNodePipelineAnnotation = "pipeline.kubekey.kubesphere.io/add-node"
// DeleteNodePipelineAnnotation remove node from cluster.
DeleteNodePipelineAnnotation = "pipeline.kubekey.kubesphere.io/delete-node"
)
type KKMachineFailedReason string
const (
// KKMachineFailedReasonAddNodeFailed add node failed.
KKMachineFailedReasonAddNodeFailed KKMachineFailedReason = "add node failed"
// KKMachineFailedReasonDeleteNodeFailed delete node failed.
KKMachineFailedReasonDeleteNodeFailed clusterv1beta1.ConditionType = "delete failed failed"
)
// KKMachineSpec defines the desired state of KKMachine.
type KKMachineSpec struct {
// Roles defines the roles assigned to the Kubernetes cluster node, such as "worker" or "control-plane".
// A KKMachine created by ControlPlane will automatically have the "control-plane" role.
// A KKMachine created by MachineDeployment will automatically have the "worker" role.
// Additional custom roles can also be specified in this field as needed.
Roles []string `json:"roles,omitempty"`
// providerID is the identification ID of the machine provided by the provider.
// This field must match the provider ID as seen on the node object corresponding to this machine.
// This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler
// with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out
// machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a
// generic out-of-tree provider for autoscaler, this field is required by autoscaler to be
// able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver
// and then a comparison is done to find out unregistered machines and are marked for delete.
// This field will be set by the actuators and consumed by higher level entities like autoscaler that will
// be interfacing with cluster-api as generic provider.
// +optional
ProviderID *string `json:"providerID,omitempty"`
// version defines the desired Kubernetes version.
// This field is meant to be optionally used by bootstrap providers.
// +optional
Version *string `json:"version,omitempty"`
// failureDomain is the failure domain the machine will be created in.
// Must match a key in the FailureDomains map stored on the cluster object.
// +optional
FailureDomain *string `json:"failureDomain,omitempty"`
// Config for machine. contains cluster version, binary version, etc.
// + optional
Config runtime.RawExtension `json:"config,omitempty"`
}
// KKMachineStatus defines the observed state of KKMachine.
type KKMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready,omitempty"`
// FailureReason will be set in the event that there is a terminal problem
// +optional
FailureReason KKMachineFailedReason `json:"failureReason,omitempty"`
// FailureMessage will be set in the event that there is a terminal problem
// +optional
FailureMessage string `json:"failureMessage,omitempty"`
// certificatesExpiryDate is the expiry date of the machine certificates.
// This value is only set for control plane machines.
// +optional
CertificatesExpiryDate *metav1.Time `json:"certificatesExpiryDate,omitempty"`
// Conditions defines current service state of the KKMachine.
// +optional
Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced,categories=cluster-api,shortName=kkm
// +kubebuilder:subresource:status
// +kubebuilder:metadata:labels="cluster.x-k8s.io/v1beta1=v1beta1"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this KKMachine belongs"
// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="the providerID for the machine"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this KKMachine"
// KKMachine resource maps a machine instance, manage and reconcile machine status.
type KKMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KKMachineSpec `json:"spec,omitempty"`
Status KKMachineStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KKMachineList of KKMachine
type KKMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []KKMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&KKMachine{}, &KKMachineList{})
}

View File

@ -17,20 +17,10 @@ limitations under the License.
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// KKMachineTemplateStatus defines a status for an KKMachineTemplate.
type KKMachineTemplateStatus struct {
// Capacity defines the resource capacity for this machine.
// This value is used for autoscaling from zero operations as defined in:
// https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md
// +optional
Capacity corev1.ResourceList `json:"capacity,omitempty"`
}
// KKMachineTemplateSpec defines the desired state of KKMachineTemplate.
type KKMachineTemplateSpec struct {
Template KKMachineTemplateResource `json:"template"`
@ -41,7 +31,7 @@ type KKMachineTemplateResource struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"`
ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"`
// Spec is the specification of the desired behavior of the machine.
Spec KKMachineSpec `json:"spec"`
@ -60,7 +50,6 @@ type KKMachineTemplate struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KKMachineTemplateSpec `json:"spec,omitempty"`
Status KKMachineTemplateStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true

View File

@ -0,0 +1,373 @@
//go:build !ignore_autogenerated
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryHost) DeepCopyInto(out *InventoryHost) {
*out = *in
out.Connector = in.Connector
in.Vars.DeepCopyInto(&out.Vars)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHost.
func (in *InventoryHost) DeepCopy() *InventoryHost {
if in == nil {
return nil
}
out := new(InventoryHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryHostConnector) DeepCopyInto(out *InventoryHostConnector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHostConnector.
func (in *InventoryHostConnector) DeepCopy() *InventoryHostConnector {
if in == nil {
return nil
}
out := new(InventoryHostConnector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKCluster) DeepCopyInto(out *KKCluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKCluster.
func (in *KKCluster) DeepCopy() *KKCluster {
if in == nil {
return nil
}
out := new(KKCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKCluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKClusterList) DeepCopyInto(out *KKClusterList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KKCluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKClusterList.
func (in *KKClusterList) DeepCopy() *KKClusterList {
if in == nil {
return nil
}
out := new(KKClusterList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKClusterSpec) DeepCopyInto(out *KKClusterSpec) {
*out = *in
if in.InventoryHosts != nil {
in, out := &in.InventoryHosts, &out.InventoryHosts
*out = make([]InventoryHost, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKClusterSpec.
func (in *KKClusterSpec) DeepCopy() *KKClusterSpec {
if in == nil {
return nil
}
out := new(KKClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKClusterStatus) DeepCopyInto(out *KKClusterStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKClusterStatus.
func (in *KKClusterStatus) DeepCopy() *KKClusterStatus {
if in == nil {
return nil
}
out := new(KKClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachine) DeepCopyInto(out *KKMachine) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachine.
func (in *KKMachine) DeepCopy() *KKMachine {
if in == nil {
return nil
}
out := new(KKMachine)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKMachine) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineList) DeepCopyInto(out *KKMachineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KKMachine, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineList.
func (in *KKMachineList) DeepCopy() *KKMachineList {
if in == nil {
return nil
}
out := new(KKMachineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKMachineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineSpec) DeepCopyInto(out *KKMachineSpec) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ProviderID != nil {
in, out := &in.ProviderID, &out.ProviderID
*out = new(string)
**out = **in
}
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(string)
**out = **in
}
if in.FailureDomain != nil {
in, out := &in.FailureDomain, &out.FailureDomain
*out = new(string)
**out = **in
}
in.Config.DeepCopyInto(&out.Config)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineSpec.
func (in *KKMachineSpec) DeepCopy() *KKMachineSpec {
if in == nil {
return nil
}
out := new(KKMachineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineStatus) DeepCopyInto(out *KKMachineStatus) {
*out = *in
if in.CertificatesExpiryDate != nil {
in, out := &in.CertificatesExpiryDate, &out.CertificatesExpiryDate
*out = (*in).DeepCopy()
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineStatus.
func (in *KKMachineStatus) DeepCopy() *KKMachineStatus {
if in == nil {
return nil
}
out := new(KKMachineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineTemplate) DeepCopyInto(out *KKMachineTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineTemplate.
func (in *KKMachineTemplate) DeepCopy() *KKMachineTemplate {
if in == nil {
return nil
}
out := new(KKMachineTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKMachineTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineTemplateList) DeepCopyInto(out *KKMachineTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KKMachineTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineTemplateList.
func (in *KKMachineTemplateList) DeepCopy() *KKMachineTemplateList {
if in == nil {
return nil
}
out := new(KKMachineTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KKMachineTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineTemplateResource) DeepCopyInto(out *KKMachineTemplateResource) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineTemplateResource.
func (in *KKMachineTemplateResource) DeepCopy() *KKMachineTemplateResource {
if in == nil {
return nil
}
out := new(KKMachineTemplateResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KKMachineTemplateSpec) DeepCopyInto(out *KKMachineTemplateSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KKMachineTemplateSpec.
func (in *KKMachineTemplateSpec) DeepCopy() *KKMachineTemplateSpec {
if in == nil {
return nil
}
out := new(KKMachineTemplateSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -26,31 +26,13 @@ import (
"k8s.io/apimachinery/pkg/util/json"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
// Config store global vars for playbook.
type Config struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec runtime.RawExtension `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigList of Config
type ConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Config `json:"items"`
}
func init() {
SchemeBuilder.Register(&Config{}, &ConfigList{})
}
// SetValue to config
// if key contains "." (a.b), will convert map and set value (a:b:value)
func (c *Config) SetValue(key string, value any) error {

24
api/core/v1/conversion.go Normal file
View File

@ -0,0 +1,24 @@
package v1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
const PipelineFieldPlaybook = "spec.playbook"
// AddConversionFuncs adds the conversion functions to the given scheme.
// NOTE: ownerReferences:pipeline is valid in proxy client.
func AddConversionFuncs(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("Pipeline"),
func(label, value string) (string, string, error) {
if label == PipelineFieldPlaybook {
return label, value, nil
}
return "", "", fmt.Errorf("field label %q not supported for Pipeline", label)
},
)
}

View File

@ -0,0 +1,131 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// InventoryCAPKKFinalizer is used to waiting ref pipeline compelete when inventory is deleted.
InventoryCAPKKFinalizer = "inventory.kubekey.kubesphere.io/capkk"
// HostCheckPipelineAnnotation store which pipeline is used to check hosts.
HostCheckPipelineAnnotation = "pipeline.kubekey.kubesphere.io/host-check"
)
// InventoryPhase of inventory. it's always use in capkk to judge if host has checked.
type InventoryPhase string
const (
// InventoryPhasePending inventory has created but has never been checked once
InventoryPhasePending InventoryPhase = "Pending"
// InventoryPhaseRunning inventory host_check pipeline is running.
InventoryPhaseRunning InventoryPhase = "Running"
// InventoryPhaseReady inventory host_check pipeline run successfully.
InventoryPhaseSucceeded InventoryPhase = "Succeeded"
// InventoryPhaseReady inventory host_check pipeline run check failed.
InventoryPhaseFailed InventoryPhase = "Failed"
)
// InventoryHost of Inventory
type InventoryHost map[string]runtime.RawExtension
// InventoryGroup of Inventory
type InventoryGroup struct {
Groups []string `json:"groups,omitempty"`
Hosts []string `json:"hosts,omitempty"`
Vars runtime.RawExtension `json:"vars,omitempty"`
}
// InventorySpec of Inventory
type InventorySpec struct {
// Hosts is all nodes
Hosts InventoryHost `json:"hosts,omitempty"`
// Vars for all host. the priority for vars is: host vars > group vars > inventory vars
// +optional
// +kubebuilder:pruning:PreserveUnknownFields
Vars runtime.RawExtension `json:"vars,omitempty"`
// Groups nodes. a group contains repeated nodes
// +optional
Groups map[string]InventoryGroup `json:"groups,omitempty"`
}
// InventoryStatus of Inventory
type InventoryStatus struct {
// Ready is the inventory ready to be used.
Ready bool `json:"ready,omitempty"`
// Phase is the inventory phase.
Phase InventoryPhase `json:"phase,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of inventory"
// Inventory store hosts vars for playbook.
type Inventory struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec InventorySpec `json:"spec,omitempty"`
Status InventoryStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InventoryList of Inventory
type InventoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Inventory `json:"items"`
}
// GetHostsFromGroup flatten a specific `Inventory` group with de-duplication.
func GetHostsFromGroup(inv *Inventory, groupName string, unavailableHosts, unavailableGroups map[string]struct{}) []string {
var hosts = make([]string, 0)
if v, ok := inv.Spec.Groups[groupName]; ok {
unavailableGroups[groupName] = struct{}{}
for _, cg := range v.Groups {
if _, exist := unavailableGroups[cg]; !exist {
unavailableGroups[cg] = struct{}{}
hosts = append(hosts, GetHostsFromGroup(inv, cg, unavailableHosts, unavailableGroups)...)
}
}
validHosts := make([]string, 0)
for _, hostname := range v.Hosts {
if _, ok := inv.Spec.Hosts[hostname]; ok {
if _, exist := unavailableHosts[hostname]; !exist {
unavailableHosts[hostname] = struct{}{}
validHosts = append(validHosts, hostname)
}
}
}
hosts = append(hosts, validHosts...)
}
return hosts
}
func init() {
SchemeBuilder.Register(&Inventory{}, &InventoryList{})
}

View File

@ -0,0 +1,201 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// BuiltinsProjectAnnotation use builtins project of KubeKey
BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
// PipelineCompletedFinalizer will be removed after the Pipeline is completed.
PipelineCompletedFinalizer = "kubekey.kubesphere.io/pipeline-completed"
)
// PipelinePhase of Pipeline
type PipelinePhase string
const (
// PipelinePhasePending of Pipeline. Pipeline has created but not deal
PipelinePhasePending PipelinePhase = "Pending"
// PipelinePhaseRunning of Pipeline. deal Pipeline.
PipelinePhaseRunning PipelinePhase = "Running"
// PipelinePhaseFailed of Pipeline. once Task run failed.
PipelinePhaseFailed PipelinePhase = "Failed"
// PipelinePhaseSucceed of Pipeline. all Tasks run success.
PipelinePhaseSucceeded PipelinePhase = "Succeeded"
)
type PipelineFailedReason string
const (
// PipelineFailedReasonUnknown is the default failed reason.
PipelineFailedReasonUnknown PipelineFailedReason = "unknown"
// PipelineFailedReasonPodFailed pod exec failed.
PipelineFailedReasonPodFailed PipelineFailedReason = "pod executor failed"
// PipelineFailedReasonTaskFailed task exec failed.
PipelineFailedReasonTaskFailed PipelineFailedReason = "task executor failed"
)
// PipelineSpec of pipeline.
type PipelineSpec struct {
// Project is storage for executable packages
// +optional
Project PipelineProject `json:"project,omitempty"`
// Playbook which to execute.
Playbook string `json:"playbook"`
// InventoryRef is the node configuration for playbook
// +optional
InventoryRef *corev1.ObjectReference `json:"inventoryRef,omitempty"`
// Config is the global variable configuration for playbook
// +optional
Config Config `json:"config,omitempty"`
// Tags is the tags of playbook which to execute
// +optional
Tags []string `json:"tags,omitempty"`
// SkipTags is the tags of playbook which skip execute
// +optional
SkipTags []string `json:"skipTags,omitempty"`
// If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
// which includes task execution status and parameters.
// +optional
Debug bool `json:"debug,omitempty"`
// Volumes in job pod.
// +optional
Volumes []corev1.Volume `json:"workVolume,omitempty"`
// VolumeMounts in job pod.
// +optional
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}
// PipelineProject respect which playbook store.
type PipelineProject struct {
// Addr is the storage for executable packages (in Ansible file format).
// When starting with http or https, it will be obtained from a Git repository.
// When starting with file path, it will be obtained from the local path.
// +optional
Addr string `json:"addr,omitempty"`
// Name is the project name base project
// +optional
Name string `json:"name,omitempty"`
// Branch is the git branch of the git Addr.
// +optional
Branch string `json:"branch,omitempty"`
// Tag is the git branch of the git Addr.
// +optional
Tag string `json:"tag,omitempty"`
// InsecureSkipTLS skip tls or not when git addr is https.
// +optional
InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty"`
// Token of Authorization for http request
// +optional
Token string `json:"token,omitempty"`
}
// PipelineStatus of Pipeline
type PipelineStatus struct {
// TaskResult total related tasks execute result.
TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
// Phase of pipeline.
Phase PipelinePhase `json:"phase,omitempty"`
// FailureReason will be set in the event that there is a terminal problem
// +optional
FailureReason PipelineFailedReason `json:"failureReason,omitempty"`
// FailureMessage will be set in the event that there is a terminal problem
// +optional
FailureMessage string `json:"failureMessage,omitempty"`
// FailedDetail will record the failed tasks.
FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
}
// PipelineTaskResult of Pipeline
type PipelineTaskResult struct {
// Total number of tasks.
Total int `json:"total,omitempty"`
// Success number of tasks.
Success int `json:"success,omitempty"`
// Failed number of tasks.
Failed int `json:"failed,omitempty"`
// Ignored number of tasks.
Ignored int `json:"ignored,omitempty"`
}
// PipelineFailedDetail store failed message when pipeline run failed.
type PipelineFailedDetail struct {
// Task name of failed task.
Task string `json:"task,omitempty"`
// failed Hosts Result of failed task.
Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
}
// PipelineFailedDetailHost detail failed message for each host.
type PipelineFailedDetailHost struct {
// Host name of failed task.
Host string `json:"host,omitempty"`
// Stdout of failed task.
Stdout string `json:"stdout,omitempty"`
// StdErr of failed task.
StdErr string `json:"stdErr,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Playbook",type="string",JSONPath=".spec.playbook"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase"
// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// Pipeline resource executor a playbook.
type Pipeline struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PipelineSpec `json:"spec,omitempty"`
Status PipelineStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineList of Pipeline
type PipelineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pipeline `json:"items"`
}
func init() {
SchemeBuilder.Register(&Pipeline{}, &PipelineList{})
}
// //+kubebuilder:webhook:path=/mutate-kubekey-kubesphere-io-v1beta1-pipeline,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=kkmachines,verbs=create;update,versions=v1beta1,name=default.kkmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1
// var _ webhook.Defaulter = &Pipeline{}
// // Default implements webhook.Defaulter so a webhook will be registered for the type
// func (k *Pipeline) Default() {
// }

View File

@ -0,0 +1,387 @@
//go:build !ignore_autogenerated
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Config) DeepCopyInto(out *Config) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (in *Config) DeepCopy() *Config {
if in == nil {
return nil
}
out := new(Config)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Inventory) DeepCopyInto(out *Inventory) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inventory.
func (in *Inventory) DeepCopy() *Inventory {
if in == nil {
return nil
}
out := new(Inventory)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Inventory) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryGroup) DeepCopyInto(out *InventoryGroup) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Vars.DeepCopyInto(&out.Vars)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryGroup.
func (in *InventoryGroup) DeepCopy() *InventoryGroup {
if in == nil {
return nil
}
out := new(InventoryGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in InventoryHost) DeepCopyInto(out *InventoryHost) {
{
in := &in
*out = make(InventoryHost, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryHost.
func (in InventoryHost) DeepCopy() InventoryHost {
if in == nil {
return nil
}
out := new(InventoryHost)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryList) DeepCopyInto(out *InventoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Inventory, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryList.
func (in *InventoryList) DeepCopy() *InventoryList {
if in == nil {
return nil
}
out := new(InventoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *InventoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventorySpec) DeepCopyInto(out *InventorySpec) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make(InventoryHost, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
in.Vars.DeepCopyInto(&out.Vars)
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make(map[string]InventoryGroup, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySpec.
func (in *InventorySpec) DeepCopy() *InventorySpec {
if in == nil {
return nil
}
out := new(InventorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InventoryStatus) DeepCopyInto(out *InventoryStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryStatus.
func (in *InventoryStatus) DeepCopy() *InventoryStatus {
if in == nil {
return nil
}
out := new(InventoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pipeline) DeepCopyInto(out *Pipeline) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
func (in *Pipeline) DeepCopy() *Pipeline {
if in == nil {
return nil
}
out := new(Pipeline)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pipeline) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetail) DeepCopyInto(out *PipelineFailedDetail) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]PipelineFailedDetailHost, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetail.
func (in *PipelineFailedDetail) DeepCopy() *PipelineFailedDetail {
if in == nil {
return nil
}
out := new(PipelineFailedDetail)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetailHost) DeepCopyInto(out *PipelineFailedDetailHost) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetailHost.
func (in *PipelineFailedDetailHost) DeepCopy() *PipelineFailedDetailHost {
if in == nil {
return nil
}
out := new(PipelineFailedDetailHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineList) DeepCopyInto(out *PipelineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pipeline, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
func (in *PipelineList) DeepCopy() *PipelineList {
if in == nil {
return nil
}
out := new(PipelineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineProject) DeepCopyInto(out *PipelineProject) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineProject.
func (in *PipelineProject) DeepCopy() *PipelineProject {
if in == nil {
return nil
}
out := new(PipelineProject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
*out = *in
out.Project = in.Project
if in.InventoryRef != nil {
in, out := &in.InventoryRef, &out.InventoryRef
*out = new(corev1.ObjectReference)
**out = **in
}
in.Config.DeepCopyInto(&out.Config)
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SkipTags != nil {
in, out := &in.SkipTags, &out.SkipTags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]corev1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (in *PipelineSpec) DeepCopy() *PipelineSpec {
if in == nil {
return nil
}
out := new(PipelineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
*out = *in
out.TaskResult = in.TaskResult
if in.FailedDetail != nil {
in, out := &in.FailedDetail, &out.FailedDetail
*out = make([]PipelineFailedDetail, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus.
func (in *PipelineStatus) DeepCopy() *PipelineStatus {
if in == nil {
return nil
}
out := new(PipelineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskResult) DeepCopyInto(out *PipelineTaskResult) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResult.
func (in *PipelineTaskResult) DeepCopy() *PipelineTaskResult {
if in == nil {
return nil
}
out := new(PipelineTaskResult)
in.DeepCopyInto(out)
return out
}

45
api/go.mod Normal file
View File

@ -0,0 +1,45 @@
module github.com/kubesphere/kubekey/api
go 1.23.3
require (
github.com/stretchr/testify v1.9.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
k8s.io/apimachinery v0.31.3
k8s.io/klog/v2 v2.130.1
sigs.k8s.io/cluster-api v1.9.2
sigs.k8s.io/controller-runtime v0.19.3
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.31.3 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)

148
api/go.sum Normal file
View File

@ -0,0 +1,148 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE=
k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4=
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/cluster-api v1.9.2 h1:4nUcIg/nOccn7/O1FF1IJaxQqjOxl+gH4ejQ9D/P+l8=
sigs.k8s.io/cluster-api v1.9.2/go.mod h1:pkFqVPq0ELlJgyDjgqpb4MU1XnWEi98B2q3DbEjC4ww=
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

153
api/project/v1/block.go Normal file
View File

@ -0,0 +1,153 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"reflect"
"strings"
"gopkg.in/yaml.v3"
)
// Block defined in project.
type Block struct {
BlockBase
// If it has Block, Task should be empty
Task
IncludeTasks string `yaml:"include_tasks,omitempty"`
BlockInfo
}
// BlockBase defined in project.
type BlockBase struct {
Base `yaml:",inline"`
Conditional `yaml:",inline"`
CollectionSearch `yaml:",inline"`
Taggable `yaml:",inline"`
Notifiable `yaml:",inline"`
Delegable `yaml:",inline"`
}
// BlockInfo defined in project.
type BlockInfo struct {
Block []Block `yaml:"block,omitempty"`
Rescue []Block `yaml:"rescue,omitempty"`
Always []Block `yaml:"always,omitempty"`
}
// Task defined in project.
type Task struct {
AsyncVal int `yaml:"async,omitempty"`
ChangedWhen When `yaml:"changed_when,omitempty"`
Delay int `yaml:"delay,omitempty"`
FailedWhen When `yaml:"failed_when,omitempty"`
Loop any `yaml:"loop,omitempty"`
LoopControl LoopControl `yaml:"loop_control,omitempty"`
Poll int `yaml:"poll,omitempty"`
Register string `yaml:"register,omitempty"`
Retries int `yaml:"retries,omitempty"`
Until When `yaml:"until,omitempty"`
// deprecated, used to be loop and loop_args but loop has been repurposed
//LoopWith string `yaml:"loop_with"`
// UnknownField store undefined field
UnknownField map[string]any `yaml:"-"`
}
// UnmarshalYAML yaml to block.
func (b *Block) UnmarshalYAML(node *yaml.Node) error {
// fill baseInfo
if err := node.Decode(&b.BlockBase); err != nil {
return fmt.Errorf("failed to decode block, error: %w", err)
}
for i := 0; i < len(node.Content); i += 2 {
keyNode := node.Content[i]
valueNode := node.Content[i+1]
switch keyNode.Value {
case "include_tasks":
b.IncludeTasks = valueNode.Value
return nil
case "block":
return node.Decode(&b.BlockInfo)
}
}
if err := node.Decode(&b.Task); err != nil {
return fmt.Errorf("failed to decode task: %w", err)
}
b.UnknownField = collectUnknownFields(node, append(getFieldNames(reflect.TypeOf(BlockBase{})), getFieldNames(reflect.TypeOf(Task{}))...))
return nil
}
// collectUnknownFields traverses a YAML node and collects fields that are not in the excludeFields list.
// It returns a map where the keys are the names of the unknown fields and the values are their corresponding values.
func collectUnknownFields(node *yaml.Node, excludeFields []string) map[string]any {
unknown := make(map[string]any)
excludeSet := make(map[string]struct{}, len(excludeFields))
for _, field := range excludeFields {
excludeSet[field] = struct{}{}
}
for i := 0; i < len(node.Content); i += 2 {
keyNode := node.Content[i]
valueNode := node.Content[i+1]
if _, excluded := excludeSet[keyNode.Value]; excluded {
continue
}
var value any
if err := valueNode.Decode(&value); err == nil {
unknown[keyNode.Value] = value
} else {
unknown[keyNode.Value] = fmt.Sprintf("failed to decode: %v", err)
}
}
return unknown
}
// getFieldNames returns a slice of field names for a given struct type.
// It inspects the struct fields and extracts the names from the "yaml" tags.
// If a field has an "inline" tag, it recursively processes the fields of the embedded struct.
func getFieldNames(t reflect.Type) []string {
var fields []string
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
yamlTag := field.Tag.Get("yaml")
if yamlTag != "" {
if strings.Contains(yamlTag, "inline") {
inlineFields := getFieldNames(field.Type)
fields = append(fields, inlineFields...)
continue
}
tagName := strings.Split(yamlTag, ",")[0]
if tagName != "" && tagName != "-" {
fields = append(fields, tagName)
}
}
}
return fields
}

View File

@ -0,0 +1,47 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"errors"
"gopkg.in/yaml.v3"
)
// Conditional defined in project.
type Conditional struct {
When When `yaml:"when,omitempty"`
}
// When defined in project.
type When struct {
Data []string
}
// UnmarshalYAML yaml string to when
func (w *When) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
w.Data = []string{node.Value}
return nil
case yaml.SequenceNode:
return node.Decode(&w.Data)
default:
return errors.New("unsupported type, excepted string or array of strings")
}
}

View File

@ -0,0 +1,45 @@
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
func TestUnmarshalWhen(t *testing.T) {
testcases := []struct {
name string
content string
except []string
}{
{
name: "test single string",
content: `
when: .a | eq "b"`,
except: []string{
".a | eq \"b\"",
},
},
{
name: "test multi string",
content: `
when:
- .a | eq "b"
- .b | ne "c"`,
except: []string{
".a | eq \"b\"",
".b | ne \"c\"",
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var when Conditional
err := yaml.Unmarshal([]byte(tc.content), &when)
assert.NoError(t, err)
assert.Equal(t, tc.except, when.When.Data)
})
}
}

View File

@ -16,8 +16,8 @@ limitations under the License.
package v1
// Delegatable defined in project.
type Delegatable struct {
// Delegable defined in project.
type Delegable struct {
DelegateTo string `yaml:"delegate_to,omitempty"`
DelegateFacts bool `yaml:"delegate_facts,omitempty"`
}

View File

@ -18,6 +18,8 @@ package v1
import (
"errors"
"gopkg.in/yaml.v3"
)
// Play defined in project.
@ -65,23 +67,18 @@ type PlaySerial struct {
}
// UnmarshalYAML yaml string to serial.
func (s *PlaySerial) UnmarshalYAML(unmarshal func(any) error) error {
var as []any
if err := unmarshal(&as); err == nil {
s.Data = as
func (s *PlaySerial) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
s.Data = []any{node.Value}
return nil
}
var a any
if err := unmarshal(&a); err == nil {
s.Data = []any{a}
return nil
}
case yaml.SequenceNode:
return node.Decode(&s.Data)
default:
return errors.New("unsupported type, excepted any or array")
}
}
// PlayHost defined in project.
type PlayHost struct {
@ -89,20 +86,15 @@ type PlayHost struct {
}
// UnmarshalYAML yaml string to play
func (p *PlayHost) UnmarshalYAML(unmarshal func(any) error) error {
var hs []string
if err := unmarshal(&hs); err == nil {
p.Hosts = hs
func (p *PlayHost) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
p.Hosts = []string{node.Value}
return nil
}
var h string
if err := unmarshal(&h); err == nil {
p.Hosts = []string{h}
return nil
}
case yaml.SequenceNode:
return node.Decode(&p.Hosts)
default:
return errors.New("unsupported type, excepted string or string array")
}
}

View File

@ -0,0 +1,78 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
func TestUnmarshalSerial(t *testing.T) {
testcases := []struct {
name string
content string
except []any
}{
{
name: "test single string",
content: `
host1`,
except: []any{
"host1",
},
},
{
name: "test single number",
content: `
1`,
except: []any{
"1",
},
},
{
name: "test single percent",
content: `
10%`,
except: []any{
"10%",
},
},
{
name: "test multi value",
content: `
- host1
- 1
- 10%`,
except: []any{
"host1",
1,
"10%",
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var serial PlaySerial
err := yaml.Unmarshal([]byte(tc.content), &serial)
assert.NoError(t, err)
assert.Equal(t, tc.except, serial.Data)
})
}
}

View File

@ -20,6 +20,10 @@ import (
"errors"
)
// NOTE:
// To unmarshal into a specific field of a struct, the field name must be explicitly specified.
// Otherwise, the UnmarshalYAML method associated with the fields struct will be treated as a method of the parent struct, rather than the field itself.
// Playbook defined in project.
type Playbook struct {
Play []Play

View File

@ -0,0 +1,267 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
func TestValidate(t *testing.T) {
testcases := []struct {
name string
playbook Playbook
}{
{
name: "host is empty",
playbook: Playbook{Play: []Play{
{
Base: Base{
Name: "test",
},
},
}},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
assert.Error(t, tc.playbook.Validate())
})
}
}
func TestUnmarshalYamlPlaybook(t *testing.T) {
testcases := []struct {
name string
data []byte
excepted []Play
}{
{
name: "Unmarshal hosts with single value",
data: []byte(`---
- name: test play
hosts: localhost
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{[]string{"localhost"}},
},
},
},
{
name: "Unmarshal hosts with multiple value",
data: []byte(`---
- name: test play
hosts: ["control-plane", "worker"]
`),
excepted: []Play{
{
Base: Base{
Name: "test play",
},
PlayHost: PlayHost{[]string{"control-plane", "worker"}},
},
},
},
{
name: "Unmarshal role with single value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- test
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal role with map value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal when with single value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
when: "true"
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Conditional: Conditional{When: When{Data: []string{"true"}}},
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal when with multiple value",
data: []byte(`---
- name: test play
hosts: localhost
roles:
- role: test
when: ["true","false"]
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{
[]string{"localhost"},
},
Roles: []Role{
{
RoleInfo{
Conditional: Conditional{When: When{Data: []string{"true", "false"}}},
Role: "test",
},
},
},
},
},
},
{
name: "Unmarshal single level block",
data: []byte(`---
- name: test play
hosts: localhost
tasks:
- name: test
custom-module: abc
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{Hosts: []string{"localhost"}},
Tasks: []Block{
{
BlockBase: BlockBase{Base: Base{Name: "test"}},
Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
},
},
},
},
},
{
name: "Unmarshal multi level block",
data: []byte(`---
- name: test play
hosts: localhost
tasks:
- name: test
block:
- name: test | test
custom-module: abc
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{Hosts: []string{"localhost"}},
Tasks: []Block{
{
BlockBase: BlockBase{Base: Base{Name: "test"}},
BlockInfo: BlockInfo{
Block: []Block{{
BlockBase: BlockBase{Base: Base{Name: "test | test"}},
Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
}},
},
},
},
},
},
},
{
name: "Unmarshal include_tasks block",
data: []byte(`---
- name: test play
hosts: localhost
tasks:
- include_tasks: task.yaml
`),
excepted: []Play{
{
Base: Base{Name: "test play"},
PlayHost: PlayHost{Hosts: []string{"localhost"}},
Tasks: []Block{
{
IncludeTasks: "task.yaml",
},
},
},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var pb Playbook
err := yaml.Unmarshal(tc.data, &pb.Play)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, tc.excepted, pb.Play)
})
}
}

51
api/project/v1/role.go Normal file
View File

@ -0,0 +1,51 @@
/*
Copyright 2023 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"gopkg.in/yaml.v3"
)
// Role defined in project.
type Role struct {
RoleInfo
}
// RoleInfo defined in project.
type RoleInfo struct {
Base `yaml:",inline"`
Conditional `yaml:",inline"`
Taggable `yaml:",inline"`
CollectionSearch `yaml:",inline"`
// Role ref in playbook
Role string `yaml:"role,omitempty"`
Block []Block
}
// UnmarshalYAML yaml string to role.
func (r *Role) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
r.Role = node.Value
case yaml.MappingNode:
return node.Decode(&r.RoleInfo)
}
return nil
}

View File

@ -7,6 +7,8 @@ ENV GOPROXY ${goproxy}
WORKDIR /workspace
COPY api/go.mod api/go.mod
COPY api/go.sum api/go.sum
COPY go.mod go.mod
COPY go.sum go.sum
@ -20,13 +22,10 @@ COPY ./ ./
ARG ldflags
ARG build_tags
ENV LDFLAGS ${ldflags}
ENV BUILDTAGS ${build_tags}
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o controller-manager cmd/controller-manager/controller_manager.go
CGO_ENABLED=0 go build -trimpath -tags "${build_tags}" -ldflags "${ldflags}" -o controller-manager cmd/controller-manager/controller_manager.go
FROM alpine:3.19.0
@ -34,4 +33,4 @@ WORKDIR /kubekey
COPY --from=builder /workspace/controller-manager /usr/local/bin/controller-manager
ENTRYPOINT ["sh"]
ENTRYPOINT ["sh","-c"]

View File

@ -7,6 +7,8 @@ ENV GOPROXY ${goproxy}
WORKDIR /workspace
COPY api/go.mod api/go.mod
COPY api/go.sum api/go.sum
COPY go.mod go.mod
COPY go.sum go.sum
@ -20,13 +22,10 @@ COPY ./ ./
ARG ldflags
ARG build_tags
ENV LDFLAGS ${ldflags}
ENV BUILDTAGS ${build_tags}
# Cache the go build into the the Gos compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
CGO_ENABLED=0 go build -trimpath -tags "$BUILDTAGS" -ldflags "$LDFLAGS" -o kk cmd/kk/kubekey.go
CGO_ENABLED=0 go build -trimpath -tags "${build_tags}" -ldflags "${ldflags}" -o kk cmd/kk/kubekey.go
FROM alpine:3.19.0
@ -40,5 +39,7 @@ RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/s
COPY --from=ghcr.io/oras-project/oras:v1.1.0 /bin/oras /usr/local/bin/oras
COPY --from=builder /workspace/kk /usr/local/bin/kk
# add builtin capkk project
COPY --from=builder /workspace/builtin/capkk /capkk/project
ENTRYPOINT ["sh"]

View File

@ -1,22 +1,22 @@
.PHONY: create-role
create-role: ## create a role necessary file in roles
@echo "Creating role $(role)..."
@mkdir -p roles/$(role)/tasks
@echo "---" > roles/$(role)/tasks/main.yaml
@mkdir -p roles/$(role)/defaults
@echo "" > roles/$(role)/defaults/main.yaml
@echo "Creating role $(role) in ${base} ..."
@mkdir -p ${base}/roles/$(role)/tasks
@echo "---" > ${base}/roles/$(role)/tasks/main.yaml
@mkdir -p ${base}/roles/$(role)/defaults
@echo "" > ${base}/roles/$(role)/defaults/main.yaml
ifeq ($(VARIABLE_NAME),"full")
@mkdir -p roles/$(role)/handlers
@mkdir -p roles/$(role)/templates
@mkdir -p roles/$(role)/files
@mkdir -p roles/$(role)/vars
@mkdir -p roles/$(role)/meta
@echo "---" > roles/$(role)/handlers/main.yaml
@echo "---" > roles/$(role)/templates/main.yaml
@echo "---" > roles/$(role)/files/main.yaml
@echo "---" > roles/$(role)/vars/main.yaml
@echo "---" > roles/$(role)/defaults/main.yaml
@echo "---" > roles/$(role)/meta/main.yaml
@mkdir -p ${base}/roles/$(role)/handlers
@mkdir -p ${base}/roles/$(role)/templates
@mkdir -p ${base}/roles/$(role)/files
@mkdir -p ${base}/roles/$(role)/vars
@mkdir -p ${base}/roles/$(role)/meta
@echo "---" > ${base}/roles/$(role)/handlers/main.yaml
@echo "---" > ${base}/roles/$(role)/templates/main.yaml
@echo "---" > ${base}/roles/$(role)/files/main.yaml
@echo "---" > ${base}/roles/$(role)/vars/main.yaml
@echo "---" > ${base}/roles/$(role)/defaults/main.yaml
@echo "---" > ${base}/roles/$(role)/meta/main.yaml
endif
@echo "Role $(role) created successfully"

View File

@ -0,0 +1,51 @@
---
- hosts:
- "{{ .node_name }}"
gather_facts: true
vars_files:
- vars/main.yaml
roles:
- precheck/env_check
- name: init cluster
hosts:
- localhost
vars_files:
- vars/main.yaml
tasks:
# https://cluster-api.sigs.k8s.io/tasks/bootstrap/kubeadm-bootstrap/index.html?highlight=ntp#additional-features
- name: init cloud-config
when: .kubernetes_installed | default false | eq false
block:
- name: get cloud-config value
command: |
cat {{ .cloud_config_dir }}/cloud-config/value
register: cloud_config_out
- name: set_fact of cloud-config value
set_fact:
cloud_config: "{{ .cloud_config_out.stdout | toJson }}"
roles:
- role: init/init-artifacts
when: .kubernetes_installed | default false | eq false
- hosts:
- "{{ .node_name }}"
roles:
- role: init/init-os
when: .kubernetes_installed | default false | eq false
- name: install cluster
hosts:
- "{{ .node_name }}"
gather_facts: true
roles:
- role: install/cri
when: .kubernetes_installed | default false | eq false
- role: install/kubernetes
when: .kubernetes_installed | default false | eq false
- role: install/cloud-config
when: .kubernetes_installed | default false | eq false
- role: install/cni
when: .kubernetes_installed | default false | eq false
- role: install/storageclass
when: .kubernetes_installed | default false | eq false

View File

@ -0,0 +1,11 @@
---
- name: uninstall node
hosts:
- "{{ .node_name }}"
vars_files:
- vars/main.yaml
gather_facts: true
roles:
- uninstall/kubernetes
- uninstall/cri
- uninstall/cloud-config

View File

@ -0,0 +1,8 @@
---
- name: Check Connect
hosts:
- |
{{ .check_group | default "ungrouped" }}
tasks:
- name: Check Connect for Hosts
command: echo success

View File

@ -0,0 +1,38 @@
# the pod's WORKDIR which set by image. store the runtime files.
work_dir: /kubekey
# binary_dir may mount by pipeline. usage it shouldn't be changed.
binary_dir: /capkk/kubekey
# cloud_config_dir may mount by pipeline. usage it shouldn't be changed.
cloud_config_dir: /capkk/cloud
# tmp_dir for kubekey in remote node. it will store file like binary package, iso file etc.
tmp_dir: /tmp/kubekey
# image registry
global_registry: ""
dockerio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
docker.io
{{- end -}}
quayio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
quay.io
{{- end -}}
ghcrio_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
ghcr.io
{{- end -}}
k8s_registry: |
{{- if ne .global_registry "" -}}
{{ .global_registry }}
{{- else -}}
registry.k8s.io
{{- end -}}
cri:
container_manager: docker

View File

@ -0,0 +1,185 @@
artifact:
arch: [ "amd64" ]
# offline artifact package for kk.
artifact_file: ""
# the md5_file of artifact_file.
artifact_md5: ""
artifact_url:
etcd:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
{{- else }}
https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
{{- else }}
https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
{{- end }}
kubeadm:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
{{- end }}
kubelet:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubelet
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubelet
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubelet
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubelet
{{- end }}
kubectl:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubectl
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubectl
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubectl
{{- else }}
https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubectl
{{- end }}
cni_plugins:
amd64: |
{{- if .kkzone | eq "cn" }}
https://github.com/containernetworking/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni_plugins_version }}.tgz
{{- else }}
https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-amd64-{{ .cni_plugins_version }}.tgz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://github.com/containernetworking/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni_plugins_version }}.tgz
{{- else }}
https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_plugins_version }}/cni-plugins-linux-arm64-{{ .cni_plugins_version }}.tgz
{{- end }}
helm:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-amd64.tar.gz
{{- else }}
https://get.helm.sh/helm-{{ .helm_version }}-linux-amd64.tar.gz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-arm64.tar.gz
{{- else }}
https://get.helm.sh/helm-{{ .helm_version }}-linux-arm64.tar.gz
{{- end }}
crictl:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
{{- else }}
https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
{{- else }}
https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
{{- end }}
docker:
amd64: |
{{- if .kkzone | eq "cn" }}
https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
{{- else }}
https://download.docker.com/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
{{- else }}
https://download.docker.com/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
{{- end }}
cridockerd:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
{{- else }}
https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
{{- else }}
https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
{{- end }}
containerd:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
{{- else }}
https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
{{- else }}
https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
{{- end }}
runc:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
{{- else }}
https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
{{- else }}
https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
{{- end }}
dockercompose:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
{{- else }}
https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
{{- else }}
https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
{{- end }}
# Notice: In the early calico helm chart, appVersion is not same as version(eg. v3.17.4)
calico: https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/tigera-operator-{{ .calico_version }}.tgz
calicoctl:
amd64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
{{- else }}
https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
{{- end }}
arm64: |
{{- if .kkzone | eq "cn" }}
https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
{{- else }}
https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
{{- end }}
flannel: https://github.com/flannel-io/flannel/releases/download/{{ .fannel_version }}/flannel.tgz
cilium: https://helm.cilium.io/cilium-{{ .cilium_version }}.tgz
ciliumcli:
amd64: |
https://github.com/cilium/cilium-cli/releases/download/{{ .ciliumcli_version }}/cilium-linux-amd64.tar.gz
arm64: |
https://github.com/cilium/cilium-cli/releases/download/{{ .ciliumcli_version }}/cilium-linux-arm64.tar.gz

View File

@ -0,0 +1,191 @@
---
- name: Check binaries for kube
command: |
kube_path={{ .binary_dir }}/kube/{{ .kube_version }}/{{ .item }}
if [ ! -f $kube_path/kubelet ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }}
fi
if [ ! -f $kube_path/kubeadm ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }}
fi
if [ ! -f $kube_path/kubectl ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .kube_version (ne .kube_version "")
- name: Check binaries for cni_plugins
command: |
artifact_name={{ get .artifact.artifact_url.cni_plugins .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni_plugins .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cni_plugins .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .cni_plugins_version (ne .cni_plugins_version "")
- name: Check binaries for ciliumcli
command: |
artifact_name={{ get .artifact.artifact_url.ciliumcli .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cni/cilium/ciliumcli-{{ .ciliumcli_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when:
- and .cilium_version (ne .cilium_version "")
- and .ciliumcli_version (ne .ciliumcli_version "")
- name: Check binaries for helm
command: |
artifact_name={{ get .artifact.artifact_url.helm .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/helm/{{ .helm_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .helm_version (ne .helm_version "")
- name: Check binaries for crictl
command: |
artifact_name={{ get .artifact.artifact_url.crictl .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.crictl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.crictl .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .crictl_version (ne .crictl_version "")
- name: Check binaries for docker
command: |
artifact_name={{ get .artifact.artifact_url.docker .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/docker/{{ .docker_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .docker_version (ne .docker_version "")
- name: Check binaries for cridockerd
command: |
artifact_name={{ get .artifact.artifact_url.cridockerd .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cridockerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cridockerd .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .cridockerd_version (ne .docker_version "")
- name: Check binaries for containerd
command: |
artifact_name={{ get .artifact.artifact_url.containerd .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.containerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.containerd .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .containerd_version (ne .containerd_version "")
- name: Check binaries for runc
command: |
artifact_name={{ get .artifact.artifact_url.runc .item | splitList "/" | last }}
artifact_path={{ .binary_dir }}/runc/{{ .runc_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.runc .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.runc .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .runc_version (ne .runc_version "")
- name: Check binaries for calicoctl
command: |
artifact_name=calicoctl
artifact_path={{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.calicoctl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.calicoctl .item }}
fi
loop: "{{ .artifact.arch | toJson }}"
when: and .calico_version (ne .calico_version "")

View File

@ -0,0 +1,33 @@
---
- name: Check binaries for calico
command: |
artifact_name={{ .artifact.artifact_url.calico | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cni/calico
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.calico }}
fi
when: and .calico_version (ne .calico_version "")
- name: Check binaries for cilium
command: |
artifact_name={{ .artifact.artifact_url.cilium | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cni/cilium
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.cilium }}
fi
when: and .cilium_version (ne .cilium_version "")
- name: Check binaries for flannel
command: |
artifact_name={{ .artifact.artifact_url.flannel | splitList "/" | last }}
artifact_path={{ .binary_dir }}/cni/flannel
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
curl -Lo $artifact_path/$artifact_name {{ .artifact.artifact_url.flannel }}
fi
when: and .flannel_version (ne .flannel_version "")

View File

@ -0,0 +1,17 @@
---
- name: Create binaries dir
command: |
mkdir -p {{ .binary_dir }}
- name: Download binaries
block:
# the binaries which download binary
- include_tasks: download_binary.yaml
# the binaries which download helm
- include_tasks: download_helm.yaml
# download remote images to local
- name: Download images
image:
pull:
manifests: "{{ .image_manifests | toJson }}"
when: .image_manifests | default list | len | lt 0

View File

@ -0,0 +1,12 @@
ntp:
servers: |
{{- with .cloud_config.ntp.servers }}
{{ . | toJson }}
{{- else }}
[ "cn.pool.ntp.org" ]
{{- end }}
enabled: |
{{ .cloud_config.ntp.enabled | default true }}
timezone: Asia/Shanghai
# set hostname by inventory_host's name which defined in inventory.yaml
set_hostname: true

View File

@ -0,0 +1,51 @@
---
- name: Configure ntp server
command: |
chronyConfigFile={{ if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}"/etc/chrony/chrony.conf"{{ else }}"/etc/chrony.conf"{{ end }}
# clear old server
sed -i '/^server/d' $chronyConfigFile
sed -i 's/^pool /#pool /g' $chronyConfigFile
sed -i '/^allow/d' $chronyConfigFile
sed -i '/^local/d' $chronyConfigFile
# add base config
echo "allow 0.0.0.0/0" >> $chronyConfigFile
echo "allow ::/0" >> $chronyConfigFile
echo "local stratum 10" >> $chronyConfigFile
# add server config
{{- range $server := (.ntp.servers | fromJson) }}
{{- $internalIPv4 := "" }}
{{- $internalIPv6 := "" }}
{{- range $.inventory_hosts }}
{{- if eq .hostname $server }}
{{- $internalIPv4 = .internal_ipv4 | default "" }}
{{- $internalIPv6 = .internal_ipv6 | default "" }}
{{- end }}
{{- end }}
# add ntp server: {{ $server }}
{{- if $internalIPv4 }}
grep -q '^server {{ $internalIPv4 }} iburst' $chronyConfigFile || sed '1a server {{ $internalIPv4 }} iburst' -i $chronyConfigFile
{{- end }}
{{- if $internalIPv6 }}
grep -q '^server {{ $internalIPv6 }} iburst' $chronyConfigFile || sed '1a server [{{ $internalIPv6 }}] iburst' -i $chronyConfigFile
{{- end }}
{{- if and (eq $internalIPv4 "") (eq $internalIPv6 "") }}
grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile
{{- end }}
{{- end }}
when:
- .ntp.enabled
- .ntp.servers | fromJson | len | lt 0
- name: Set timezone
command: |
timedatectl set-timezone {{ .timezone }}
timedatectl set-ntp {{ and .ntp.enabled (.ntp.servers | fromJson | len | lt 0) }}
when: or (and .ntp.enabled (.ntp.servers | fromJson | len | lt 0)) (.timezone | ne "")
- name: Restart ntp server
command: |
{{- if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}
systemctl restart chrony.service
{{- end }}
systemctl restart chronyd.service
when: or (and .ntp.enabled (.ntp.servers | fromJson | len | lt 0)) (.timezone | ne "")

View File

@ -0,0 +1,79 @@
---
- name: Sync repository
block:
- name: Sync repository file
ignore_errors: true
copy:
src: |
{{ .binary_dir }}/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type.stdout }}.iso
dest: |
{{ .tmp_dir }}/repository.iso
- name: Mount iso file
command: |
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
mount -t iso9660 -o loop {{ .tmp_dir }}/repository.iso {{ .tmp_dir }}/iso
fi
rescue:
- name: Unmount iso file
command: |
if [ -f "{{ .tmp_dir }}/repository.iso" ]; then
umount {{ .tmp_dir }}/iso
fi
- name: Init repository
block:
- name: Init debian repository
command: |
now=$(date +"%Y-%m-%d %H:%M:%S")
if [ -f "{{ .tmp_dir }}/repository.iso" ];then
# backup
mv /etc/apt/sources.list /etc/apt/sources.list.kubekey-$now.bak
mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey-$now.bak
mkdir -p /etc/apt/sources.list.d
# add repository
rm -rf /etc/apt/sources.list.d/*
echo 'deb [trusted=yes] file://{{ .tmp_dir }}/iso /' > /etc/apt/sources.list.d/kubekey.list
# update repository
apt-get update
# install
apt install -y socat conntrack ipset ebtables chrony ipvsadm
# reset repository
rm -rf /etc/apt/sources.list.d
mv /etc/apt/sources.list.kubekey.bak-$now /etc/apt/sources.list
mv /etc/apt/sources.list.d.kubekey.bak-$now /etc/apt/sources.list.d
else
apt-get update && apt install -y socat conntrack ipset ebtables chrony ipvsadm
fi
when: .os.release.ID_LIKE | eq "debian"
- name: Init rhel repository
command: |
now=$(date +"%Y-%m-%d %H:%M:%S")
if [ -f "{{ .tmp_dir }}/repository.iso" ];then
# backup
mv /etc/yum.repos.d /etc/yum.repos.d.kubekey-$now.bak
mkdir -p /etc/yum.repos.d
# add repository
rm -rf /etc/yum.repos.d/*
cat << EOF > /etc/yum.repos.d/CentOS-local.repo
[base-local]
name=rpms-local
baseurl=file://{{ .tmp_dir }}/repository.iso
enabled=1
gpgcheck=0
EOF
# update repository
yum clean all && yum makecache
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
# reset repository
rm -rf /etc/yum.repos.d
mv /etc/yum.repos.d.kubekey.bak-$now /etc/yum.repos.d
else
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
fi
when: .os.release.ID_LIKE | eq "\"rhel fedora\""

View File

@ -0,0 +1,23 @@
---
- include_tasks: init_repository.yaml
- include_tasks: init_ntpserver.yaml
when: .ntp.enabled
- name: Set hostname
command: |
hostnamectl set-hostname {{ .inventory_name }} \
&& sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_name }}/g' /etc/hosts
when:
- .set_hostname
- .inventory_name | ne "localhost"
- name: Sync init os to remote
template:
src: init-os.sh
dest: /etc/kubekey/scripts/init-os.sh
mode: 0755
- name: Execute init os script
command: |
/etc/kubekey/scripts/init-os.sh

View File

@ -0,0 +1,264 @@
#!/usr/bin/env bash
# Copyright 2020 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------ 1. Disable Swap and SELinux -----------------------
swapoff -a
sed -i /^[^#]*swap*/s/^/\#/g /etc/fstab
# See https://github.com/kubernetes/website/issues/14457
if [ -f /etc/selinux/config ]; then
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
fi
# for ubuntu: sudo apt install selinux-utils
# for centos: yum install selinux-policy
if command -v setenforce &> /dev/null
then
setenforce 0
getenforce
fi
# ------------------------ 2. Network Settings (Sysctl) ------------------------
echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf
echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_watches = 10240001' >> /etc/sysctl.conf
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf
echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf
{{- if and .internal_ipv4 (.internal_ipv4 | ne "") }}
# add for ipv4
echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf
{{- end }}
{{- if and .internal_ipv6 (.internal_ipv6 | ne "") }}
# add for ipv6
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv6.conf.all.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.default.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.lo.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.conf
echo 'net.ipv6.conf.default.accept_dad=0' >> /etc/sysctl.conf
echo 'net.ipv6.route.max_size=65536' >> /etc/sysctl.conf
echo 'net.ipv6.neigh.default.retrans_time_ms=1000' >> /etc/sysctl.conf
{{- end }}
# ------------------------ 3. Tweaks for Specific Networking Configurations -----
#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.swappiness ?= ?([0-9]{1,})@vm.swappiness = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.overcommit_memory ?= ?(0|1|2)@vm.overcommit_memory = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_watches ?= ?([0-9]{1,})@fs.inotify.max_user_watches = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.pipe-max-size ?= ?([0-9]{1,})@fs.pipe-max-size = 4194304@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.netdev_max_backlog ?= ?([0-9]{1,})@net.core.netdev_max_backlog = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.rmem_max ?= ?([0-9]{1,})@net.core.rmem_max = 33554432@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.wmem_max ?= ?([0-9]{1,})@net.core.wmem_max = 33554432@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.somaxconn ?= ?([0-9]{1,})@net.core.somaxconn = 32768@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.aio-max-nr ?= ?([0-9]{1,})@fs.aio-max-nr = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf
{{- if and .internal_ipv4 (.internal_ipv4 | ne "") }}
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_syn_backlog ?= ?([0-9]{1,})@net.ipv4.tcp_max_syn_backlog = 1048576@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh1 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh1 = 512@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh2 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh2 = 2048@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh3 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh3 = 4096@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.eth0.arp_accept ?= ?(0|1)@net.ipv4.conf.eth0.arp_accept = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_retries2 ?= ?([0-9]{1,})@net.ipv4.tcp_retries2 = 15@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_tw_buckets ?= ?([0-9]{1,})@net.ipv4.tcp_max_tw_buckets = 1048576@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_orphans ?= ?([0-9]{1,})@net.ipv4.tcp_max_orphans = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min = 131072@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf
{{- end }}
{{- if and .internal_ipv6 (.internal_ipv6 | ne "") }}
#add for ipv6
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.conf.all.disable_ipv6 ?= ?([0-9]{1,})@net.ipv6.conf.all.disable_ipv6 = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.conf.default.disable_ipv6 ?= ?([0-9]{1,})@net.ipv6.conf.default.disable_ipv6 = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.conf.lo.disable_ipv6 ?= ?([0-9]{1,})@net.ipv6.conf.lo.disable_ipv6 = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.conf.all.forwarding ?= ?([0-9]{1,})@net.ipv6.conf.all.forwarding = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.conf.default.accept_dad ?= ?([0-9]{1,})@net.ipv6.conf.default.accept_dad = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.route.max_size ?= ?([0-9]{1,})@net.ipv6.route.max_size = 65536@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv6.neigh.default.retrans_time_ms ?= ?([0-9]{1,})@net.ipv6.neigh.default.retrans_time_ms = 1000@g" /etc/sysctl.conf
{{- end }}
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
mv $tmpfile /etc/sysctl.conf
# ------------------------ 4. Security Limit ------------------------------------
# ulimit
echo "* soft nofile 1048576" >> /etc/security/limits.conf
echo "* hard nofile 1048576" >> /etc/security/limits.conf
echo "* soft nproc 65536" >> /etc/security/limits.conf
echo "* hard nproc 65536" >> /etc/security/limits.conf
echo "* soft memlock unlimited" >> /etc/security/limits.conf
echo "* hard memlock unlimited" >> /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf
mv $tmpfile /etc/security/limits.conf
# ------------------------ 5. Firewall Configurations ---------------------------
systemctl stop firewalld 1>/dev/null 2>/dev/null
systemctl disable firewalld 1>/dev/null 2>/dev/null
systemctl stop ufw 1>/dev/null 2>/dev/null
systemctl disable ufw 1>/dev/null 2>/dev/null
# ------------------------ 6. System Module Settings ----------------------------
modinfo br_netfilter > /dev/null 2>&1
if [ $? -eq 0 ]; then
modprobe br_netfilter
mkdir -p /etc/modules-load.d
echo 'br_netfilter' > /etc/modules-load.d/kubekey-br_netfilter.conf
fi
modinfo overlay > /dev/null 2>&1
if [ $? -eq 0 ]; then
modprobe overlay
echo 'overlay' >> /etc/modules-load.d/kubekey-br_netfilter.conf
fi
# ------------------------ 7. IPTables and Connection Tracking -----------------
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
cat > /etc/modules-load.d/kube_proxy-ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF
modprobe nf_conntrack_ipv4 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo 'nf_conntrack_ipv4' > /etc/modules-load.d/kube_proxy-ipvs.conf
else
modprobe nf_conntrack
echo 'nf_conntrack' > /etc/modules-load.d/kube_proxy-ipvs.conf
fi
sysctl -p
# ------------------------ 8. Local Host DNS Configuration ---------------------
sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' /etc/hosts
sed -i '/^$/N;/\n$/N;//D' /etc/hosts
cat >>/etc/hosts<<EOF
# kubekey hosts BEGIN
# kubernetes hosts
{{- range .groups.k8s_cluster | default list }}
{{- $hostname := index $.inventory_hosts . "hostname" -}}
{{- $clusterName := $.kubernetes.cluster_name | default "kubekey" -}}
{{- $dnsDomain := $.kubernetes.networking.dns_domain | default "cluster.local" -}}
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
{{ index $.inventory_hosts . "internal_ipv4" }} {{ $hostname }} {{ printf "%s.%s" $hostname $clusterName }} {{ printf "%s.%s.%s" $hostname $clusterName $dnsDomain }}
{{- end }}
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
{{ index $.inventory_hosts . "internal_ipv6" }} {{ $hostname }} {{ printf "%s.%s" $hostname $clusterName }} {{ printf "%s.%s.%s" $hostname $clusterName $dnsDomain }}
{{- end }}
{{- end }}
# etcd hosts
{{- range .groups.etcd | default list }}
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
{{ index $.inventory_hosts . "internal_ipv4" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
{{ index $.inventory_hosts . "internal_ipv6" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- end }}
# image registry hosts
{{- range .groups.image_registry | default list }}
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
{{ index $.inventory_hosts . "internal_ipv4" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
{{ index $.inventory_hosts . "internal_ipv6" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- end }}
# nfs hosts
{{- range .groups.nfs | default list }}
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
{{ index $.inventory_hosts . "internal_ipv4" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
{{ index $.inventory_hosts . "internal_ipv4" }} {{ index $.inventory_hosts . "hostname" }}
{{- end }}
{{- end }}
# kubekey hosts END
EOF
sync
echo 3 > /proc/sys/vm/drop_caches
# Make sure the iptables utility doesn't use the nftables backend.
{{- if and .internal_ipv4 (.internal_ipv4 | ne "") }}
update-alternatives --set iptables /usr/sbin/iptables-legacy >/dev/null 2>&1 || true
{{- end }}
{{- if and .internal_ipv6 (.internal_ipv6 | ne "") }}
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true
{{- end }}
update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true

View File

@ -0,0 +1,5 @@
kubernetes:
control_plane_endpoint:
kube_vip:
image: |
{{ .dockerio_registry }}/plndr/kube-vip:v0.7.2

View File

@ -0,0 +1,143 @@
---
- name: Install kube-vip
when:
- eq .kubernetes.control_plane_endpoint.type "vip"
- or (.kubernetes.roles | has "master") (.kubernetes.roles | has "control-plane")
template:
src: kube-vip.yaml
dest: /etc/kubernetes/manifests/kube-vip.yaml
# not support "encoding","append","owner".
- name: Deal cloud-config write_files
loop: "{{ .cloud_config.write_files | toJson }}"
copy:
content: "{{ .item.content }}"
dest: "{{ .item.path }}"
mode: "{{ .item.permissions }}"
- name: Deal cloud-config users
loop: "{{ .cloud_config.users | toJson }}"
command: |
#!/bin/bash
if id "{{ .item.name }}" &>/dev/null; then
echo "User '{{ .item.name }}' already exists"
exit 0
fi
# Create user '{{ .item.name }}' with bash shell and home directory
useradd "{{ .item.name }}"
{{- if .item.passwd }}
# Set password
echo "{{ .item.name }}:{{ .item.passwd }}" | chpasswd
{{- end }}
{{- if .item.gecos }}
# Set gecos information
usermod -c "{{ .item.gecos }}" "{{ .item.name }}"
{{- end }}
{{- if .item.home_dir }}
# Set home directory
usermod -d "{{ .item.home_dir }}" "{{ .item.name }}"
{{- end }}
{{- if .item.shell }}
# Set shell
usermod -s "{{ .item.shell }}" "{{ .item.name }}"
{{- end }}
{{- if .item.primary_group }}
# Set primary group
usermod -g "{{ .item.primary_group }}" "{{ .item.name }}"
{{- end }}
{{- if .item.lock_passwd }}
# Lock password
usermod -L "{{ .item.name }}"
{{- end }}
{{- if .item.groups }}
# Add user to groups
usermod -aG "{{ .item.groups }}" "{{ .item.name }}"
{{- end }}
{{- if .item.sudo }}
# Add user to sudoers
echo "{{ .item.name }} {{ .item.sudo }}" > "/etc/sudoers.d/{{ .item.name }}"
{{- end }}
{{- if .item.ssh_authorized_keys }}
# Add SSH authorized keys
mkdir -p "{{ .item.home_dir }}/.ssh"
echo "{{ .item.ssh_authorized_keys }}" > "{{ .item.home_dir }}/.ssh/authorized_keys"
chown -R "{{ .item.name }}" "{{ .item.home_dir }}/.ssh"
chmod 700 "{{ .item.home_dir }}/.ssh"
chmod 600 "{{ .item.home_dir }}/.ssh/authorized_keys"
{{- end }}
- name: Deal cloud-config disk_setup
when: .cloud_config.disk_setup
command: |
#!/bin/bash
{{- range $_, $disk := .cloud_config.disk_setup }}
if lsblk | grep -q "{{ $disk.device }}"; then
echo "Disk {{ $disk.device }} already configured, skipping."
else
echo "Configuring disk {{ $disk.device }}"
# setup disk on '{{ $disk.device }}'
{{- if equal $disk.table_type "gpt" }}
parted "{{ $disk.device }}" mklabel gpt
{{- else equal $disk.table_type "mbr" }}
parted "{{ $disk.device }}" mklabel msdos
{{- end }}
{{- if $disk.layout }}
# create a single partition for the entire disk
parted -a optimal "{{ $disk.device }}" mkpart primary ext4 0% 100%
{{- end }}
fi
{{- end }}
- name: Deal cloud-config fs_setup
loop: "{{ .cloud_config.fs_setup | toJson }}"
command: |
#!/bin/bash
DEVICE="{{ .item.device }}"
{{- if .item.partition }}
DEVICE="${DEVICE}{{ .item.partition | atoi }}"
{{- end }}
if blkid "$DEVICE" &>/dev/null; then
echo "Filesystem already exists on $DEVICE"
{{- if .item.overwrite }}
# Overwrite existing filesystem on '$DEVICE'
mkfs -t "{{ .item.filesystem }}" '$DEVICE'
{{- else }}
else
echo "Creating filesystem on $DEVICE..."
mkfs -t "{{ .item.filesystem }}" {{- if .item.label }}-L "{{ .item.label }}"{{ end }} {{- range .item.extra_opts }}"{{ . }}" {{ end }} "$DEVICE"
fi
- name: Deal cloud-config mount
loop: "{{ .cloud_config.mounts | toJson }}"
command: |
#!/bin/bash
MOUNT_POINT="{{ last .item }}"
if mountpoint -q "$MOUNT_POINT"; then
echo "Mount point $MOUNT_POINT already mounted, skipping."
else
echo "Mounting {{ first .item }} to $MOUNT_POINT..."
mount -L "{{ first .item }}" "$MOUNT_POINT"
echo "LABEL={{ first .item }} $MOUNT_POINT ext4 defaults 0 0" >> /etc/fstab
fi
- name: Deal runcmd
loop: "{{ .cloud_config.runcmd | toJson }}"
command: "{{ .item }}"
- name: Sync kubeconfig
copy:
src: |
{{ .cloud_config_dir }}/kubeconfig/value
dest: /root/.kube/config
mode: 0600
- name: Label kubernetes role
loop: "{{ .kubernetes.roles | toJson }}"
command: |
#!/bin/bash
kubectl label node "{{ .hostname }}" node-role.kubernetes.io/{{ .item }}="" --overwrite

View File

@ -0,0 +1,62 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: address
value: {{ .kubernetes.control_plane_endpoint.kube_vip.address }}
- name: vip_interface
value: ""
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: lb_enable
value: "true"
- name: lb_port
value: "6443"
image: {{ .kubernetes.control_plane_endpoint.kube_vip.image }}
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
type: FileOrCreate
name: kubeconfig

View File

@ -0,0 +1,113 @@
cni:
type: |
{{ .kubernetes.kube_network_plugin | default "calico" }}
# ip cidr config.
# In Kubernetes, the Pod CIDR supports both IPv4 and IPv6 configurations. It can be specified as follows:
# "Single-stack IPv4": the pod_cidr value format "ipv4"
# "Single-stack IPv6": the pod_cidr value format "ipv6"
# "Dual-stack (IPv4 and IPv6)": the pod_cidr value format "ipv4,ipv6"
ipv4_support: |
{{- eq (.cluster_network.pods.cidrBlocks | first | ipFamily) "IPv4" }}
ipv4_pods_cidr: |
{{- if eq (.cluster_network.pods.cidrBlocks | first | ipFamily) "IPv4" }}
{{- .cluster_network.pods.cidrBlocks | first }}
{{- end }}
ipv4_block_size: 24
ipv6_support: |
{{- eq (.cluster_network.pods.cidrBlocks | last | ipFamily) "IPv6" }}
ipv6_pods_cidr: |
{{- if eq (.cluster_network.pods.cidrBlocks | last | ipFamily) "IPv6" }}
{{- .cluster_network.pods.cidrBlocks | last }}
{{- end }}
ipv6_block_size: 120
kube_svc_cidr: |
{{ .cluster_network.service.cidrBlocks | join "," }}
calico:
values: |
# calico helm values
installation:
registry: {{ .dockerio_registry }}
calicoNetwork:
bgp: Enabled
cilium:
values: |
# cilium helm values
image:
repository: {{ .quayio_registry }}/cilium/cilium-cli
certgen:
image:
repository: {{ .quayio_registry }}/cilium/certgen
hubble:
relay:
image:
repository: {{ .quayio_registry }}/cilium/hubble-relay-ci
ui:
backend:
image:
repository: {{ .quayio_registry }}/cilium/hubble-ui-backend
frontend:
image:
repository: {{ .quayio_registry }}/cilium/hubble-ui
envoy:
image:
repository: {{ .quayio_registry }}/cilium/cilium-envoy
operator:
replicas: 2
image:
repository: {{ .quayio_registry }}/cilium/operator
nodeinit:
image:
repository: {{ .quayio_registry }}/cilium/startup-script
preflight:
image:
repository: {{ .quayio_registry }}/cilium/cilium-ci
clustermesh:
apiserver:
image:
repository: {{ .quayio_registry }}/cilium/clustermesh-apiserver-ci
authentication:
mutual:
spire:
install:
initImage:
repository: {{ .dockerio_registry }}/library/busybox
agent:
image:
repository: {{ .ghcrio_registry }}/spiffe/spire-agent
server:
image:
repository: {{ .ghcrio_registry }}/spiffe/spire-server
ipv4:
enabled: {{ .cni.ipv4_support }}
ipv6:
enabled: {{ .cni.ipv6_support }}
ipam:
operator:
{{- if .cni.ipv4_support }}
clusterPoolIPv4PodCIDRList:
- {{ .cni.ipv4_pods_cidr }}
clusterPoolIPv4MaskSize: {{ .cni.ipv4_block_size }}
{{- end }}
{{- if .cni.ipv6_support }}
clusterPoolIPv6PodCIDRList:
- {{ .cni.ipv6_pods_cidr }}
clusterPoolIPv6MaskSize: {{ .cni.ipv6_block_size }}
{{- end }}
{{- if not (.kubernetes.kube_proxy.enabled | default true) }}
kubeProxyReplacement: "true"
k8sServiceHost: {{ .kubernetes.control_plane_endpoint.host }}
k8sServicePort: {{ .kubernetes.control_plane_endpoint.port }}
{{- end }}
flannel:
# https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md
values: |
# flannel helm values
podCidr: {{ .cni.ipv4_pod_cidr }}
podCidrv6: {{ .cni.ipv6_pod_cidr }}
flannel:
image:
repository: {{ .dockerio_registry }}/flannel/flannel
image_cni:
repository: {{ .dockerio_registry }}/flannel/flannel-cni-plugin
# support "vxlan" and "host-gw"
backend: vxlan

View File

@ -0,0 +1,32 @@
---
- name: Check if calicoctl is installed
ignore_errors: true
command: calicoctl version
register: calicoctl_install_version
- name: Install calicoctl
when: .calicoctl_install_version.stderr | ne ""
block:
- name: Sync calicoctl to remote
copy:
src: |
{{ .binary_dir }}/cni/calico/{{ .calico_version }}/{{ .binary_type.stdout }}/calicoctl
dest: /usr/local/bin/calicoctl
mode: 0755
- name: Sync calico package to remote
copy:
src: |
{{ .binary_dir }}/cni/calico/tigera-operator-{{ .calico_version }}.tgz
dest: |
/etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz
- name: Generate calico custom value file
copy:
content: |
{{ .cni.calico.values }}
dest: |
/etc/kubernetes/cni/calico-values.yaml
- name: Apply calico
command: |
helm install --create-namespace --namespace tigera-operator calico /etc/kubernetes/cni/tigera-operator-{{ .calico_version }}.tgz -f /etc/kubernetes/cni/calico-values.yaml

View File

@ -0,0 +1,27 @@
---
- name: Sync cilium cli package
when: and .ciliumcli_version (ne .ciliumcli_version "")
copy:
src: |
{{ .binary_dir }}/cni/cilium/ciliumcli-{{ .ciliumcli_version }}/{{ .item }}
dest: |
/usr/local/bin/cilium
- name: Sync cilium helm chart package
copy:
src: |
{{ .binary_dir }}/cni/cilium/cilium-{{ .cilium_version }}.tgz
dest: |
/etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz
- name: Sync cilium helm chart custom value file
copy:
content: |
{{ .cni.cilium.values }}
dest: |
/etc/kubernetes/cni/cilium-values.yaml
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
- name: Install cilium
command: |
helm install --namespace kube-system cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz -f /etc/kubernetes/cni/cilium-values.yaml

View File

@ -0,0 +1,19 @@
---
# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
- name: Sync flannel package to remote
copy:
src: |
{{ .binary_dir }}/cni/flannel/flannel.tgz
dest: |
/etc/kubernetes/cni/flannel.tgz
- name: Generate flannel custom value file
copy:
content: |
{{ .cni.flannel.values }}
dest: |
/etc/kubernetes/cni/flannel-values.yaml
- name: Apply flannel
command: |
helm install --create-namespace --namespace kube-flannel flannel /etc/kubernetes/cni/flannel.tgz -f /etc/kubernetes/cni/flannel-values.yaml

View File

@ -0,0 +1,19 @@
---
- name: check cni by helm
command: helm list -a -A -q -o json 2>/dev/null
register: installed_helm_packages
- include_tasks: calico.yaml
when:
- .cni.type | eq "calico"
- .installed_helm_packages.stdout | has "calico" | not
- include_tasks: flannel.yaml
when:
- .cni.type | eq "flannel"
- .installed_helm_packages.stdout | has "flannel" | not
- include_tasks: cilium.yaml
when:
- .cni.type | eq "cilium"
- .installed_helm_packages.stdout | has "cilium" | not

View File

@ -0,0 +1,19 @@
cri:
# support: systemd, cgroupfs
cgroup_driver: systemd
sandbox_image: |
{{ .k8s_registry }}/pause:3.5
# support: containerd,docker,crio
# the endpoint of containerd
cri_socket: |
{{- if .cri.container_manager | eq "containerd" }}
unix:///var/run/containerd.sock
{{- end }}
containerd:
data_root: /var/lib/containerd
docker:
data_root: /var/lib/docker
registry:
mirrors: ["https://registry-1.docker.io"]
insecure_registries: []
auths: []

View File

@ -0,0 +1,40 @@
---
- name: Check if runc is installed
ignore_errors: true
command: runc --version
register: runc_install_version
- name: Sync runc binary to remote
when: or (.runc_install_version.stderr | ne "") (.runc_install_version.stdout | contains (printf "runc version %s\n" (.runc_version | default "" | trimPrefix "v" )) | not)
copy:
src: |
{{ .binary_dir }}/runc/{{ .runc_version }}/{{ .binary_type.stdout }}/runc.{{ .binary_type.stdout }}
dest: /usr/local/bin/runc
mode: 0755
- name: Check if containerd is installed
ignore_errors: true
command: containerd --version
register: containerd_install_version
- name: Install containerd
when: or (.containerd_install_version.stderr | ne "") (.containerd_install_version.stdout | contains (printf " %s " .containerd_version) | not)
block:
- name: Sync containerd binary to remote
copy:
src: |
{{ .binary_dir }}/containerd/{{ .containerd_version }}/{{ .binary_type.stdout }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
{{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Unpackage containerd binary
command: |
tar -xvf {{ .tmp_dir }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate containerd config file
template:
src: containerd.config
dest: /etc/containerd/config.toml
- name: Generate containerd Service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
- name: Start containerd
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service

View File

@ -0,0 +1,22 @@
---
- name: Check if crictl is installed
ignore_errors: true
command: crictl --version
register: crictl_install_version
- name: Install crictl
when: or (.crictl_install_version.stderr | ne "") (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
block:
- name: Sync crictl binary to remote
copy:
src: |
{{ .binary_dir }}/crictl/{{ .crictl_version }}/{{ .binary_type.stdout }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
{{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Unpackage crictl binary
command: |
tar -xvf {{ .tmp_dir }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate crictl config file
template:
src: crictl.config
dest: /etc/crictl.yaml

View File

@ -0,0 +1,29 @@
---
- name: Check if cri-dockerd is installed
ignore_errors: true
command: cri-dockerd --version
register: cridockerd_install_version
- name: Install cri-dockerd
when: or (.cridockerd_install_version.stderr | ne "") (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
block:
- name: Sync cri-dockerd Binary to remote
copy:
src: |
{{ .binary_dir }}/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type.stdout }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
{{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Generate cri-dockerd config file
template:
src: cri-dockerd.config
dest: /etc/cri-dockerd.yaml
- name: Unpackage cri-dockerd binary
command: |
tar -xvf {{ .tmp_dir }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
- name: Generate cri-dockerd Service file
template:
src: cri-dockerd.service
dest: /etc/systemd/system/cri-dockerd.service
- name: Start cri-dockerd service
command: |
systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service

View File

@ -0,0 +1,34 @@
---
- name: Check if docker is installed
ignore_errors: true
command: docker --version
register: docker_install_version
- name: Install docker
when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
block:
- name: Sync docker binary to remote
copy:
src: |
{{ .binary_dir }}/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
dest: |
{{ .tmp_dir }}/docker-{{ .docker_version }}.tgz
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf {{ .tmp_dir }}/docker-{{ .docker_version }}.tgz --wildcards docker/*
- name: Generate docker config file
template:
src: docker.config
dest: /etc/docker/daemon.json
- name: Generate docker service file
copy:
src: docker.service
dest: /etc/systemd/system/docker.service
- name: Generate containerd service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service

View File

@ -0,0 +1,17 @@
---
# install crictl
- include_tasks: install_crictl.yaml
# install docker
- include_tasks: install_docker.yaml
when: .cri.container_manager | eq "docker"
# install containerd
- include_tasks: install_containerd.yaml
when: .cri.container_manager | eq "containerd"
# install cridockerd
- include_tasks: install_cridockerd.yaml
when:
- .cri.container_manager | eq "docker"
- .kube_version | semverCompare ">=v1.24.0"

View File

@ -0,0 +1,77 @@
version = 2
root = {{ .cri.containerd.data_root | default "/var/lib/containerd" }}
state = "/run/containerd"
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[ttrpc]
address = ""
uid = 0
gid = 0
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "{{ .cri.sandbox_image }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = {{ if .cri.cgroup_driver | eq "systemd") }}true{{ else }}false{{ end }}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
{{- if .cri.registry.mirrors | len | lt 0 }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = {{ .cri.registry.mirrors | toJson }}
{{- end }}
{{- range .cri.registry.insecure_registries }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ . }}"]
endpoint = ["http://{{ . }}"]
{{- end }}
{{- if .cri.registry.auths | len | lt 0 }}
[plugins."io.containerd.grpc.v1.cri".registry.configs]
{{- range .cri.registry.auths }}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth]
username = "{{ .username }}"
password = "{{ .password }}"
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".tls]
{{- if.ca_file }}
ca_file = {{ .ca_file }}
{{- end }}
{{- if .crt_file }}
cert_file = {{ .crt_file }}
{{- end }}
{{- if .key_file }}
key_file = {{ .key_file }}
{{- end }}
insecure_skip_verify = {{ .skip_ssl | default true }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,79 @@
---
- name: Check if helm is installed
ignore_errors: true
command: helm version
register: helm_install_version
- name: Install helm
when: or (.helm_install_version.stderr | ne "") (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
block:
- name: Sync helm to remote
copy:
src: |
{{ .binary_dir }}/helm/{{ .helm_version }}/{{ .binary_type.stdout }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
dest: |
{{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Install helm
command: |
tar --strip-components=1 -zxvf {{ .tmp_dir }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ .binary_type.stdout }}/helm
- name: Check if kubeadm is installed
ignore_errors: true
command: kubeadm version -o short
register: kubeadm_install_version
- name: Install kubeadm
when: or (.kubeadm_install_version.stderr | ne "") (.kubeadm_install_version.stdout | ne .kube_version)
copy:
src: |
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubeadm
dest: /usr/local/bin/kubeadm
mode: 0755
- name: Check if kubectl is installed
ignore_errors: true
command: kubectl version --short
register: kubectl_install_version
- name: Sync kubectl to remote
when: |
or (.kubectl_install_version.stderr | ne "") ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
copy:
src: |
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubectl
dest: /usr/local/bin/kubectl
mode: 0755
- name: Check if kubelet is installed
ignore_errors: true
command: kubelet --version
register: kubelet_install_version
- name: Install kubelet
when: or (.kubelet_install_version.stderr | ne "") (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
block:
- name: Sync kubelet to remote
copy:
src: |
{{ .binary_dir }}/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubelet
dest: /usr/local/bin/kubelet
mode: 0755
- name: Sync kubelet env to remote
template:
src: kubeadm/kubelet.env
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
- name: Sync kubelet service to remote
copy:
src: kubelet.service
dest: /etc/systemd/system/kubelet.service
- name: Register kubelet service
command: systemctl daemon-reload && systemctl enable kubelet.service
- name: Install cni plugins
when: and .cni_plugins_version (ne .cni_plugins_version "")
block:
- name: Sync cni-plugin to remote
copy:
src: |
{{ .binary_dir }}/cni/plugins/{{ .cni_plugins_version }}/{{ .binary_type.stdout }}/cni-plugins-linux-{{ .binary_type.stdout }}-{{ .cni_plugins_version }}.tgz
dest: |
{{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type.stdout }}-{{ .cni_plugins_version }}.tgz
- name: Install cni-plugin
command: |
tar -zxvf {{ .tmp_dir }}/cni-plugins-linux-{{ .binary_type.stdout }}-{{ .cni_plugins_version }}.tgz -C /opt/cni/bin/

View File

@ -0,0 +1,18 @@
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generate at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
{{- $internalIPv4 := .internal_ipv4 | default "" }}
{{- $internalIPv6 := .internal_ipv6 | default "" }}
{{- if ne $internalIPv4 "" }}
Environment="KUBELET_EXTRA_ARGS=--node-ip={{ $internalIPv4 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
{{- else if ne $internalIPv6 "" }}
Environment="KUBELET_EXTRA_ARGS=--node-ip={{ $internalIPv6 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
{{- end }}
ExecStart=
ExecStart=/usr/local/bin/kubelet --provider-id=kk://{{ .kubernetes.cluster_name }}/{{ .node_name }} $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

View File

@ -0,0 +1,18 @@
---
- name: Stop if container manager is not docker or containerd
assert:
that: .cluster_require.require_container_manager | has .cri.container_manager
fail_msg: |
the container manager:{{ .cri.container_manager }}, must be {{ .cluster_require.require_container_manager | toJson }}
run_once: true
when: and .cri.container_manager (ne .cri.container_manager "")
- name: Ensure minimum containerd version
assert:
that: .containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required)
fail_msg: |
containerd_version is too low. Minimum version {{ .cluster_require.containerd_min_version_required }}
run_once: true
when:
- and .containerd_version (ne .containerd_version "")
- .cri.container_manager | eq "containerd"

View File

@ -0,0 +1,54 @@
- name: Should defined internal_ipv4 or internal_ipv6
assert:
that: or (and .internal_ipv4 (ne .internal_ipv4 "")) (and .internal_ipv6 (ne .internal_ipv6 ""))
fail_msg: |
"internal_ipv4" and "internal_ipv6" cannot both be empty
- name: Check kubevip if valid
run_once: true
assert:
that:
- and .kubernetes.control_plane_endpoint.kube_vip.address (ne .kubernetes.control_plane_endpoint.kube_vip.address "")
- .kubernetes.control_plane_endpoint.kube_vip.address | regexMatch "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])|(([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)|(([0-9a-fA-F]{1,4}:){1,6}|:):([0-9a-fA-F]{1,4}|:){1,6}([0-9a-fA-F]{1,4}|:)))$"
- |
{{- $existIP := false }}
{{- range .groups.all | default list }}
{{- if eq $.kubernetes.control_plane_endpoint.kube_vip.address (index $.inventory_hosts . "internal_ipv4") }}
{{- $existIP = true }}
{{- end }}
{{- end }}
{{ not $existIP }}
fail_msg: |
"kubernetes.control_plane_endpoint.kube_vip.address" should be a un-used ip address.
when: .kubernetes.control_plane_endpoint.type | eq "kube_vip"
- name: Stop if unsupported version of Kubernetes
run_once: true
assert:
that: .kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required)
fail_msg: |
the current release of KubeKey only support newer version of Kubernetes than {{ .cluster_require.kube_version_min_required }} - You are trying to apply {{ .kube_version }}
when: and .kube_version (ne .kube_version "")
- name: Check if kubernetes installed
when: .groups.k8s_cluster | default list | has .inventory_name
block:
- name: Get kubernetes service
ignore_errors: true
command: systemctl is-active kubelet.service
register: kubernetes_install_service
- name: Get kubernetes version
ignore_errors: true
command: kubelet --version
register: kubernetes_install_version
- name: Check kubernetes service and version
when: .kubernetes_install_service.stdout | eq "active"
block:
- name: Check kubernetes version
assert:
that: .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " | eq .kube_version
fail_msg: |
kubernetes has installed with version:{{ .kubernetes_install_version.stdout | default "" | trimPrefix "Kubernetes " }}. but not match kube_version: {{ .kube_version }}
- name: Set_Fact kubernetes_version
set_fact:
kubernetes_installed: true

View File

@ -0,0 +1,12 @@
---
- include_tasks: kubernetes.yaml
- include_tasks: os.yaml
tags: ["os"]
- include_tasks: network.yaml
tags: ["network"]
- include_tasks: cri.yaml
tags: ["cri"]

View File

@ -0,0 +1,79 @@
---
# - name: Should found network interface
# command: |
# {{- if and .internal_ipv4 (.internal_ipv4 | ne "") }}
# if [ ! ip -o addr show | grep -q {{ .internal_ipv4 }} ]; then
# echo "No ipv4 network interface found"
# exit 1
# fi
# {{- end }}
# {{- if and .internal_ipv6 (.internal_ipv6 | ne "") }}
# if [ ! ip -o addr show | grep -q {{ .internal_ipv6 }} ]; then
# echo "No ipv6 network interface found"
# exit 1
# fi
# {{- end }}
# https://kubernetes.io/docs/concepts/services-networking/dual-stack/
- name: Stop if cidr is not valid
block:
- name: Stop if pod cidr is not valid
when: and .kubernetes.networking.pod_cidr (.kubernetes.networking.pod_cidr | ne "")
assert:
that: .kubernetes.networking.pod_cidr | splitList "," | len | ge 2
fail_msg: |
"kubernetes.networking.pod_cidr" should be ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
- name: Stop if service cidr is not valid
when: and .kubernetes.networking.service_cidr (.kubernetes.networking.service_cidr | ne "")
assert:
that: .kubernetes.networking.service_cidr | splitList "," | len | ge 2
fail_msg: |
"kubernetes.networking.service_cidr" should be ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
- name: Stop if pod networking is not support dual-stack
when:
- and .kubernetes.networking.pod_cidr (.kubernetes.networking.pod_cidr | ne "")
- .kubernetes.networking.pod_cidr | splitList "," | len | eq 2
assert:
that:
- .kube_version | semverCompare ">=v1.20.0"
- .kubernetes.networking.pod_cidr | splitList "," | first | ipFamily | eq "IPv4"
- .kubernetes.networking.pod_cidr | splitList "," | last | ipFamily | eq "IPv6"
fail_msg: |
Kubernetes introduced support for pod dual-stack networking starting from version v1.20.0.
- name: Stop if service networking is not support dual-stack
when:
- and .kubernetes.networking.service_cidr (.kubernetes.networking.service_cidr | ne "")
- .kubernetes.networking.service_cidr | splitList "," | len | eq 2
assert:
that:
- .kube_version | semverCompare ">=v1.20.0"
- .kubernetes.networking.service_cidr | splitList "," | first | ipFamily | eq "IPv4"
- .kubernetes.networking.service_cidr | splitList "," | last | ipFamily | eq "IPv6"
fail_msg: |
Kubernetes introduced support for service dual-stack networking starting from version v1.20.0.
- name: Stop if unknown network plugin
assert:
that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin
fail_msg: |
kube_network_plugin:"{{ .kubernetes.kube_network_plugin }}" is not supported
when: and .kubernetes.kube_network_plugin (ne .kubernetes.kube_network_plugin "")
# # This assertion will fail on the safe side: One can indeed schedule more pods
# # on a node than the CIDR-range has space for when additional pods use the host
# # network namespace. It is impossible to ascertain the number of such pods at
# # provisioning time, so to establish a guarantee, we factor these out.
# # NOTICE: the check blatantly ignores the inet6-case
# - name: Guarantee that enough network address space is available for all pods
# when: .groups.k8s_cluster | default list | has .inventory_name
# block:
# - name: Guarantee that enough ipv4 network address space is available for all pods
# when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily | eq "IPv4"
# assert:
# that: le .kubernetes.kubelet.max_pods (sub (pow 2 (sub 32 .kubernetes.networking.ipv4_mask_size | default 24 | float64)) 2)
# fail_msg: do not schedule more pods on a node than ipv4 inet addresses are available.
# - name: Guarantee that enough ipv6 network address space is available for all pods
# when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily | eq "IPv6"
# assert:
# that: le .kubernetes.kubelet.max_pods (sub (pow 2 (sub 128 .kubernetes.networking.ipv6_mask_size | default 64 | float64)) 2)
# fail_msg: do not schedule more pods on a node than ipv6 inet addresses are available.

View File

@ -0,0 +1,8 @@
---
- name: deal cloud-config users
loop: "{{ .cloud_config.users | toJson }}"
command: |
#!/bin/bash
# Delete user '{{ .item.name }}'
userdel -r "{{ .item.name }}"

View File

@ -0,0 +1,5 @@
cri:
containerd:
data_root: /var/lib/containerd
docker:
data_root: /var/lib/docker

View File

@ -0,0 +1,19 @@
---
- name: Stop containerd
include_tasks: uninstall_containerd.yaml
when: .cri.container_manager | eq "containerd"
- name: Stop docker
include_tasks: uninstall_docker.yaml
when: .cri.container_manager | eq "docker"
# install cridockerd
- include_tasks: uninstall_cridockerd.yaml
when:
- .cri.container_manager | eq "docker"
- .kube_version | semverCompare ">=v1.24.0"
- and .cridockerd_version (.cridockerd_version | ne "")
- name: Delete residue files
command: |
rm -f /usr/local/bin/crictl

Some files were not shown because too many files have changed in this diff Show More