diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95a0d6b0..9a816e68 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,10 +19,10 @@ jobs: GO111MODULE: on steps: - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v3 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index e6f1e887..2fc78ea2 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: 1.18 + go-version: 1.19 - name: golangci-lint uses: golangci/golangci-lint-action@v3.2.0 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 3b330195..e2afc14a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -19,7 +19,7 @@ jobs: - name: Install go uses: actions/setup-go@v3 with: - go-version: '^1.18' + go-version: '^1.19' - name: generate release artifacts run: | make release diff --git a/.golangci.yml b/.golangci.yml index 941efda2..4680dc30 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -124,14 +124,16 @@ linters-settings: # CAPKK - pkg: github.com/kubesphere/kubekey/api/v1beta1 alias: infrav1 + - pkg: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1 + alias: infrabootstrapv1 nolintlint: allow-unused: false allow-leading-space: false require-specific: true staticcheck: - go: "1.18" + go: "1.19" stylecheck: - go: "1.18" + go: "1.19" gosec: excludes: - G307 # Deferring unsafe method "Close" on type "\*os.File" @@ -156,8 +158,6 @@ linters-settings: - commentFormatting - filepathJoin - commentedOutCode - unused: - go: "1.18" issues: max-same-issues: 0 max-issues-per-linter: 0 @@ -166,83 +166,75 @@ issues: exclude-use-default: false exclude-rules: - linters: - - revive + - revive text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" - linters: - - errcheck + - errcheck text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # Exclude some packages or code to require comments, for example test code, or fake clients. + # Exclude revive's exported for certain packages and code, e.g. tests and fake. - linters: - - revive + - revive text: exported (method|function|type|const) (.+) should have comment or be unexported source: (func|type).*Fake.* - linters: - - revive + - revive text: exported (method|function|type|const) (.+) should have comment or be unexported path: fake_\.go - linters: - - revive + - revive text: exported (method|function|type|const) (.+) should have comment or be unexported - path: cmd/clusterctl/internal/test/providers.*.go + path: .*test/(providers|framework|e2e).*.go - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: "(framework|e2e)/.*.go" - # Disable unparam "always receives" which might not be really - # useful when building libraries. + - errcheck + text: Error return value is not checked + path: _test\.go - linters: - - unparam + - errcheck + text: Error return value of (.+) is not checked + path: _test\.go + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - godot + text: "Comment should end in a period" + path: "(.*)/(v1beta1|v1beta2)/(.*)types.go" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # This directive allows the variable in defaults.go files to have underscore + - linters: + - revive + text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)" + path: .*/defaults.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam text: always receives - # Dot imports for gomega or ginkgo are allowed - # within test files. + # Dot imports for gomega or ginkgo are allowed + # within test files. - path: _test\.go text: should not use dot imports - path: (framework|e2e)/.*.go text: should not use dot imports - path: _test\.go text: cyclomatic complexity - # Append should be able to assign to a different var/slice. - linters: - - gocritic + - unparam + text: (.+) - (`t`|`g`) is unused + - path: _test\.go + text: cyclomatic complexity + # Append should be able to assign to a different var/slice. + - linters: + - gocritic text: "appendAssign: append result not assigned to the same slice" - # ifshort flags variables that are only used in the if-statement even though there is - # already a SimpleStmt being used in the if-statement in question. - - linters: - - ifshort - text: "variable .* is only used in the if-statement" - path: controllers/mdutil/util.go - # Disable linters for conversion - - linters: - - staticcheck - text: "SA1019: in.(.+) is deprecated" - path: .*(api|types)\/.*\/conversion.*\.go$ - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - revive - text: "var-naming: don't use underscores in Go names;" - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - revive - text: "receiver-naming: receiver name" - path: .*(api|types)\/.*\/conversion.*\.go$ - - linters: - - stylecheck - text: "ST1003: should not use underscores in Go names;" - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - stylecheck - text: "ST1016: methods on the same type should have the same receiver name" - path: .*(api|types)\/.*\/conversion.*\.go$ - # hack/tools - - linters: - - typecheck - text: import (".+") is a program, not an importable package - path: ^tools\.go$ - - # We don't care about defer in for loops in test files. - linters: - gocritic @@ -262,4 +254,3 @@ run: - "vendored_openapi\\.go$" - "cmd" allow-parallel-runners: true - go: '1.18' diff --git a/Dockerfile b/Dockerfile index 6627cd19..3c2aeadb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ WORKDIR /tmp RUN apk add --no-cache ca-certificates # Build the manager binary -FROM golang:1.18 as builder +FROM golang:1.19 as builder # Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy ARG goproxy=https://goproxy.cn,direct diff --git a/Makefile b/Makefile index 192314fa..09e2430d 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ SHELL:=/usr/bin/env bash # # Go. # -GO_VERSION ?= 1.18.3 +GO_VERSION ?= 1.19.2 GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) # Use GOPROXY environment variable if set @@ -72,9 +72,17 @@ REGISTRY ?= docker.io/kubespheredev PROD_REGISTRY ?= docker.io/kubesphere # capkk -CAPKK_IMAGE_NAME ?= capkk-manager +CAPKK_IMAGE_NAME ?= capkk-controller CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME) +# bootstrap +K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller +K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME) + +# control plane +K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller +K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME) + # It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971 TAG ?= dev @@ -110,7 +118,7 @@ help: ## Display this help. ##@ generate: -ALL_GENERATE_MODULES = capkk +ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane .PHONY: generate generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets @@ -131,17 +139,55 @@ generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e output:webhook:dir=./config/webhook \ webhook +.PHONY: generate-manifests-k3s-bootstrap +generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core + $(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases" + $(CONTROLLER_GEN) \ + paths=./bootstrap/k3s/api/... \ + crd:crdVersions=v1 \ + rbac:roleName=manager-role \ + output:crd:dir=./bootstrap/k3s/config/crd/bases \ + output:rbac:dir=./bootstrap/k3s/config/rbac \ + output:webhook:dir=./bootstrap/k3s/config/webhook \ + webhook + +.PHONY: generate-manifests-k3s-control-plane +generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core + $(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases" + $(CONTROLLER_GEN) \ + paths=./controlplane/k3s/api/... \ + crd:crdVersions=v1 \ + rbac:roleName=manager-role \ + output:crd:dir=./controlplane/k3s/config/crd/bases \ + output:rbac:dir=./controlplane/k3s/config/rbac \ + output:webhook:dir=./controlplane/k3s/config/webhook \ + webhook + .PHONY: generate-go-deepcopy generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets $(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES)) .PHONY: generate-go-deepcopy-capkk -generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for core +generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for capkk $(MAKE) clean-generated-deepcopy SRC_DIRS="./api" $(CONTROLLER_GEN) \ object:headerFile=./hack/boilerplate.go.txt \ paths=./api/... \ +.PHONY: generate-go-deepcopy-k3s-bootstrap +generate-go-deepcopy-k3s-bootstrap: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-bootstrap + $(MAKE) clean-generated-deepcopy SRC_DIRS="./bootstrap/k3s/api" + $(CONTROLLER_GEN) \ + object:headerFile=./hack/boilerplate.go.txt \ + paths=./bootstrap/k3s/api/... \ + +.PHONY: generate-go-deepcopy-k3s-control-plane +generate-go-deepcopy-k3s-control-plane: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-control-plane + $(MAKE) clean-generated-deepcopy SRC_DIRS="./controlplane/k3s/api" + $(CONTROLLER_GEN) \ + object:headerFile=./hack/boilerplate.go.txt \ + paths=./controlplane/k3s/api/... \ + .PHONY: generate-modules generate-modules: ## Run go mod tidy to ensure modules are up to date go mod tidy @@ -194,7 +240,7 @@ verify-gen: generate ## Verify go generated files are up to date kk: CGO_ENABLED=0 go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk github.com/kubesphere/kubekey/cmd/kk; -ALL_MANAGERS = capkk +ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane .PHONY: managers managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets @@ -203,6 +249,14 @@ managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets manager-capkk: ## Build the capkk manager binary into the ./bin folder go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey +.PHONY: manager-k3s-bootstrap +manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder + go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/bootstrap/k3s + +.PHONY: manager-k3s-control-plane +manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder + go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/controlplane/k3s + .PHONY: docker-pull-prerequisites docker-pull-prerequisites: docker pull docker.io/docker/dockerfile:1.4 @@ -214,19 +268,33 @@ docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images docker-build-%: $(MAKE) ARCH=$* docker-build -ALL_DOCKER_BUILD = capkk +ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane + +.PHONY: docker-build +docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers + $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD)) .PHONY: docker-build-capkk docker-build-capkk: ## Build the docker image for capkk DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG) - $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) + $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./config/default/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml" +.PHONY: docker-build-k3s-bootstrap +docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager + DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) + $(MAKE) set-manifest-image MANIFEST_IMG=$(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml" + +.PHONY: docker-build-k3s-control-plane +docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager + DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) + $(MAKE) set-manifest-image MANIFEST_IMG=$(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml" + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml" + .PHONY: docker-build-e2e docker-build-e2e: ## Build the docker image for capkk - DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t "$(CAPKK_CONTROLLER_IMG):e2e" - $(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG) MANIFEST_TAG="e2e" TARGET_RESOURCE="./config/default/manager_image_patch.yaml" - $(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml" + $(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e ## -------------------------------------- ## Deployment @@ -293,6 +361,10 @@ test-cover: ## Run unit and integration tests and generate a coverage report test-e2e: ## Run e2e tests $(MAKE) -C $(TEST_DIR)/e2e run +.PHONY: test-e2e-k3s +test-e2e-k3s: ## Run e2e tests + $(MAKE) -C $(TEST_DIR)/e2e run-k3s + ## -------------------------------------- ## Release ## -------------------------------------- @@ -380,6 +452,7 @@ release-templates: $(RELEASE_DIR) ## Generate release templates .PHONY: docker-push docker-push: ## Push the docker images docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG) + docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) .PHONY: set-manifest-pull-policy set-manifest-pull-policy: diff --git a/api/v1beta1/kkcluster_types.go b/api/v1beta1/kkcluster_types.go index 3f2375fe..0fd125cd 100644 --- a/api/v1beta1/kkcluster_types.go +++ b/api/v1beta1/kkcluster_types.go @@ -26,10 +26,17 @@ const ( // ClusterFinalizer allows ReconcileKKCluster to clean up KK resources associated with KKCluster before // removing it from the apiserver. ClusterFinalizer = "kkcluster.infrastructure.cluster.x-k8s.io" + + // KUBERNETES the Kubernetes distributions + KUBERNETES = "kubernetes" + // K3S the K3S distributions + K3S = "k3s" ) // KKClusterSpec defines the desired state of KKCluster type KKClusterSpec struct { + // Distribution represents the Kubernetes distribution type of the cluster. + Distribution string `json:"distribution,omitempty"` // Nodes represents the information about the nodes available to the cluster Nodes Nodes `json:"nodes"` diff --git a/api/v1beta1/kkcluster_webhook.go b/api/v1beta1/kkcluster_webhook.go index fb15025d..b9af8f5d 100644 --- a/api/v1beta1/kkcluster_webhook.go +++ b/api/v1beta1/kkcluster_webhook.go @@ -56,10 +56,20 @@ var _ webhook.Defaulter = &KKCluster{} func (k *KKCluster) Default() { kkclusterlog.Info("default", "name", k.Name) + defaultDistribution(&k.Spec) defaultAuth(&k.Spec.Nodes.Auth) defaultInstance(&k.Spec) } +func defaultDistribution(spec *KKClusterSpec) { + if spec.Distribution == "" { + spec.Distribution = "kubernetes" + } + if spec.Distribution == "k8s" { + spec.Distribution = "kubernetes" + } +} + func defaultAuth(auth *Auth) { if auth.User == "" { auth.User = defaultSSHUser @@ -101,6 +111,7 @@ func (k *KKCluster) ValidateCreate() error { kkclusterlog.Info("validate create", "name", k.Name) var allErrs field.ErrorList + allErrs = append(allErrs, validateDistribution(k.Spec)...) allErrs = append(allErrs, validateClusterNodes(k.Spec.Nodes)...) allErrs = append(allErrs, validateLoadBalancer(k.Spec.ControlPlaneLoadBalancer)...) @@ -143,6 +154,20 @@ func (k *KKCluster) ValidateDelete() error { return nil } +func validateDistribution(spec KKClusterSpec) []*field.Error { + var errs field.ErrorList + path := field.NewPath("spec", "distribution") + switch spec.Distribution { + case K3S: + return errs + case KUBERNETES: + return errs + default: + errs = append(errs, field.NotSupported(path, spec.Distribution, []string{K3S, KUBERNETES})) + } + return errs +} + func validateLoadBalancer(loadBalancer *KKLoadBalancerSpec) []*field.Error { var errs field.ErrorList path := field.NewPath("spec", "controlPlaneLoadBalancer") diff --git a/api/v1beta1/kkclustertemplate_webhook.go b/api/v1beta1/kkclustertemplate_webhook.go index 97eb39f0..ce365602 100644 --- a/api/v1beta1/kkclustertemplate_webhook.go +++ b/api/v1beta1/kkclustertemplate_webhook.go @@ -44,6 +44,7 @@ var _ webhook.Defaulter = &KKClusterTemplate{} func (r *KKClusterTemplate) Default() { kkclustertemplatelog.Info("default", "name", r.Name) + defaultDistribution(&r.Spec.Template.Spec) defaultAuth(&r.Spec.Template.Spec.Nodes.Auth) defaultInstance(&r.Spec.Template.Spec) } @@ -57,6 +58,7 @@ func (r *KKClusterTemplate) ValidateCreate() error { kkclustertemplatelog.Info("validate create", "name", r.Name) var allErrs field.ErrorList + allErrs = append(allErrs, validateDistribution(r.Spec.Template.Spec)...) allErrs = append(allErrs, validateClusterNodes(r.Spec.Template.Spec.Nodes)...) allErrs = append(allErrs, validateLoadBalancer(r.Spec.Template.Spec.ControlPlaneLoadBalancer)...) diff --git a/bootstrap/k3s/PROJECT b/bootstrap/k3s/PROJECT new file mode 100644 index 00000000..6a3da5c4 --- /dev/null +++ b/bootstrap/k3s/PROJECT @@ -0,0 +1,35 @@ +domain: cluster.x-k8s.io +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: k3s +repo: github.com/kubesphere/kubekey/bootstrap/k3s +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: bootstrap + kind: K3sConfig + path: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + domain: cluster.x-k8s.io + group: bootstrap + kind: K3sConfigTemplate + path: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +version: "3" diff --git a/bootstrap/k3s/api/v1beta1/groupversion_info.go b/bootstrap/k3s/api/v1beta1/groupversion_info.go new file mode 100644 index 00000000..3c387174 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the bootstrap v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=bootstrap.cluster.x-k8s.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/bootstrap/k3s/api/v1beta1/k3sconfig.go b/bootstrap/k3s/api/v1beta1/k3sconfig.go new file mode 100644 index 00000000..4f0fdc99 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/k3sconfig.go @@ -0,0 +1,161 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1beta1 + +// ServerConfiguration defines the desired state of k3s server configuration. +type ServerConfiguration struct { + // Database is the database configuration. + Database Database `json:"database,omitempty"` + + // Listener is the listener configuration. + Listener Listener `json:"listener,omitempty"` + + // Networking is the networking configuration. + Networking Networking `json:"networking,omitempty"` + + // Agent is the agent configuration. + Agent AgentConfiguration `json:"agent,omitempty"` +} + +// AgentConfiguration defines the desired state of k3s agent configuration. +type AgentConfiguration struct { + // Node defines the k3s agent node configuration. + Node AgentNode `json:"node,omitempty"` + + // Runtime defines the k3s agent runtime configuration. + Runtime AgentRuntime `json:"runtime,omitempty"` + + // Networking defines the k3s agent networking configuration. + Networking AgentNetworking `json:"networking,omitempty"` +} + +// Database defines the desired state of k3s database configuration. +type Database struct { + // DataStoreEndPoint specify etcd, Mysql, Postgres, or Sqlite (default) data source name. + DataStoreEndPoint string `json:"dataStoreEndPoint,omitempty"` + + // DataStoreCAFile TLS Certificate Authority file used to secure datastore backend communication. + DataStoreCAFile string `json:"dataStoreCAFile,omitempty"` + + // DataStoreCertFile TLS certification file used to secure datastore backend communication. + DataStoreCertFile string `json:"dataStoreCertFile,omitempty"` + + // DataStoreKeyFile TLS key file used to secure datastore backend communication. + DataStoreKeyFile string `json:"dataStoreKeyFile,omitempty"` + + // ClusterInit initialize a new cluster using embedded Etcd. + ClusterInit bool `json:"clusterInit,omitempty"` +} + +// Cluster is the desired state of k3s cluster configuration. +type Cluster struct { + // Token shared secret used to join a server or agent to a cluster. + Token string `json:"token,omitempty"` + + // TokenFile file containing the cluster-secret/token. + TokenFile string `json:"tokenFile,omitempty"` + + // Server which server to connect to, used to join a cluster. + Server string `json:"server,omitempty"` +} + +// Listener defines the desired state of k3s listener configuration. +type Listener struct { + // BindAddress k3s bind address. + BindAddress string `json:"bindAddress,omitempty"` + + // HTTPSListenPort HTTPS listen port. + HTTPSListenPort int `json:"httpsListenPort,omitempty"` + + // AdvertiseAddress IP address that apiserver uses to advertise to members of the cluster. + AdvertiseAddress string `json:"advertiseAddress,omitempty"` + + // AdvertisePort Port that apiserver uses to advertise to members of the cluster (default: listen-port). + AdvertisePort int `json:"advertisePort,omitempty"` + + // TLSSan Add additional hostname or IP as a Subject Alternative Name in the TLS cert. + TLSSan string `json:"tlsSan,omitempty"` +} + +// Networking defines the desired state of k3s networking configuration. +type Networking struct { + // ClusterCIDR Network CIDR to use for pod IPs. + ClusterCIDR string `json:"clusterCIDR,omitempty"` + + // ServiceCIDR Network CIDR to use for services IPs. + ServiceCIDR string `json:"serviceCIDR,omitempty"` + + // ServiceNodePortRange Port range to reserve for services with NodePort visibility. + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` + + // ClusterDNS cluster IP for coredns service. Should be in your service-cidr range. + ClusterDNS string `json:"clusterDNS,omitempty"` + + // ClusterDomain cluster Domain. + ClusterDomain string `json:"clusterDomain,omitempty"` + + // FlannelBackend One of ‘none’, ‘vxlan’, ‘ipsec’, ‘host-gw’, or ‘wireguard’. (default: vxlan) + FlannelBackend string `json:"flannelBackend,omitempty"` +} + +// AgentNode defines the desired state of k3s agent node configuration. +type AgentNode struct { + // NodeName k3s node name. + NodeName string `json:"nodeName,omitempty"` + + // NodeLabels registering and starting kubelet with set of labels. + NodeLabels []string `json:"nodeLabels,omitempty"` + + // NodeTaints registering and starting kubelet with set of taints. + NodeTaints []string `json:"nodeTaints,omitempty"` + + // SeLinux Enable SELinux in containerd + SeLinux bool `json:"seLinux,omitempty"` + + // LBServerPort + // Local port for supervisor client load-balancer. + // If the supervisor and apiserver are not colocated an additional port 1 less than this port + // will also be used for the apiserver client load-balancer. (default: 6444) + LBServerPort int `json:"lbServerPort,omitempty"` + + // DataDir Folder to hold state. + DataDir string `json:"dataDir,omitempty"` +} + +// AgentRuntime defines the desired state of k3s agent runtime configuration. +type AgentRuntime struct { + // ContainerRuntimeEndpoint Disable embedded containerd and use alternative CRI implementation. + ContainerRuntimeEndpoint string `json:"containerRuntimeEndpoint,omitempty"` + + // PauseImage Customized pause image for containerd or Docker sandbox. + PauseImage string `json:"pauseImage,omitempty"` + + // PrivateRegistry Path to a private registry configuration file. + PrivateRegistry string `json:"privateRegistry,omitempty"` +} + +// AgentNetworking defines the desired state of k3s agent networking configuration. +type AgentNetworking struct { + // NodeIP IP address to advertise for node. + NodeIP string `json:"nodeIP,omitempty"` + + // NodeExternalIP External IP address to advertise for node. + NodeExternalIP string `json:"nodeExternalIP,omitempty"` + + // ResolvConf Path to Kubelet resolv.conf file. + ResolvConf string `json:"resolvConf,omitempty"` +} diff --git a/bootstrap/k3s/api/v1beta1/k3sconfig_types.go b/bootstrap/k3s/api/v1beta1/k3sconfig_types.go new file mode 100644 index 00000000..572accb8 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/k3sconfig_types.go @@ -0,0 +1,120 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" +) + +// K3sConfigSpec defines the desired state of K3sConfig +type K3sConfigSpec struct { + // Files specifies extra files to be passed to user_data upon creation. + // +optional + Files []bootstrapv1.File `json:"files,omitempty"` + + // Cluster defines the k3s cluster Options. + Cluster *Cluster `json:"cluster,omitempty"` + + // ServerConfiguration defines the k3s server configuration. + // +optional + ServerConfiguration *ServerConfiguration `json:"serverConfiguration,omitempty"` + + // AgentConfiguration defines the k3s agent configuration. + // +optional + AgentConfiguration *AgentConfiguration `json:"agentConfiguration,omitempty"` + + // PreK3sCommands specifies extra commands to run before k3s setup runs + // +optional + PreK3sCommands []string `json:"preK3sCommands,omitempty"` + + // PostK3sCommands specifies extra commands to run after k3s setup runs + // +optional + PostK3sCommands []string `json:"postK3sCommands,omitempty"` + + // Version specifies the k3s version + // +optional + Version string `json:"version,omitempty"` +} + +// K3sConfigStatus defines the observed state of K3sConfig +type K3sConfigStatus struct { + // Ready indicates the BootstrapData field is ready to be consumed + Ready bool `json:"ready,omitempty"` + + BootstrapData []byte `json:"bootstrapData,omitempty"` + + // DataSecretName is the name of the secret that stores the bootstrap data script. + // +optional + DataSecretName *string `json:"dataSecretName,omitempty"` + + // FailureReason will be set on non-retryable errors + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // FailureMessage will be set on non-retryable errors + // +optional + FailureMessage string `json:"failureMessage,omitempty"` + + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions defines current service state of the K3sConfig. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=k3sconfigs,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of K3sConfig" + +// K3sConfig is the Schema for the k3sConfigs API +type K3sConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec K3sConfigSpec `json:"spec,omitempty"` + Status K3sConfigStatus `json:"status,omitempty"` +} + +// GetConditions returns the set of conditions for this object. +func (c *K3sConfig) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +// SetConditions sets the conditions on this object. +func (c *K3sConfig) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +//+kubebuilder:object:root=true + +// K3sConfigList contains a list of K3sConfig +type K3sConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []K3sConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&K3sConfig{}, &K3sConfigList{}) +} diff --git a/bootstrap/k3s/api/v1beta1/k3sconfig_webhook.go b/bootstrap/k3s/api/v1beta1/k3sconfig_webhook.go new file mode 100644 index 00000000..5d4b0948 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/k3sconfig_webhook.go @@ -0,0 +1,146 @@ +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var ( + conflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file" + missingSecretNameMsg = "secret file source must specify non-empty secret name" + missingSecretKeyMsg = "secret file source must specify non-empty secret key" + pathConflictMsg = "path property must be unique among all files" +) + +func (c *K3sConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(c). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig,mutating=true,failurePolicy=fail,sideEffects=None,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs,versions=v1beta1,name=default.k3sconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &K3sConfig{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (c *K3sConfig) Default() { + DefaultK3sConfigSpec(&c.Spec) +} + +// DefaultK3sConfigSpec defaults a K3sConfigSpec. +func DefaultK3sConfigSpec(c *K3sConfigSpec) { +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs,versions=v1beta1,name=validation.k3sconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Validator = &K3sConfig{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (c *K3sConfig) ValidateCreate() error { + return c.Spec.validate(c.Name) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (c *K3sConfig) ValidateUpdate(old runtime.Object) error { + return c.Spec.validate(c.Name) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (c *K3sConfig) ValidateDelete() error { + return nil +} + +func (c *K3sConfigSpec) validate(name string) error { + allErrs := c.Validate(field.NewPath("spec")) + + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid(GroupVersion.WithKind("K3sConfig").GroupKind(), name, allErrs) +} + +// Validate ensures the K3sConfigSpec is valid. +func (c *K3sConfigSpec) Validate(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, c.validateFiles(pathPrefix)...) + + return allErrs +} + +func (c *K3sConfigSpec) validateFiles(pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + knownPaths := map[string]struct{}{} + + for i := range c.Files { + file := c.Files[i] + if file.Content != "" && file.ContentFrom != nil { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("files").Index(i), + file, + conflictingFileSourceMsg, + ), + ) + } + // n.b.: if we ever add types besides Secret as a ContentFrom + // Source, we must add webhook validation here for one of the + // sources being non-nil. + if file.ContentFrom != nil { + if file.ContentFrom.Secret.Name == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "name"), + missingSecretNameMsg, + ), + ) + } + if file.ContentFrom.Secret.Key == "" { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "key"), + missingSecretKeyMsg, + ), + ) + } + } + _, conflict := knownPaths[file.Path] + if conflict { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("files").Index(i).Child("path"), + file, + pathConflictMsg, + ), + ) + } + knownPaths[file.Path] = struct{}{} + } + + return allErrs +} diff --git a/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_types.go b/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_types.go new file mode 100644 index 00000000..dce24f20 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_types.go @@ -0,0 +1,57 @@ +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// K3sConfigTemplateSpec defines the desired state of K3sConfigTemplate +type K3sConfigTemplateSpec struct { + Template K3sConfigTemplateResource `json:"template"` +} + +// K3sConfigTemplateResource defines the Template structure +type K3sConfigTemplateResource struct { + Spec K3sConfigSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=k3sconfigtemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of K3sConfigTemplate" + +// K3sConfigTemplate is the Schema for the k3sconfigtemplates API +type K3sConfigTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec K3sConfigTemplateSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// K3sConfigTemplateList contains a list of K3sConfigTemplate +type K3sConfigTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []K3sConfigTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&K3sConfigTemplate{}, &K3sConfigTemplateList{}) +} diff --git a/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_webhook.go b/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_webhook.go new file mode 100644 index 00000000..626334b4 --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/k3sconfigtemplate_webhook.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +func (r *K3sConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigtemplates,versions=v1beta1,name=default.k3sconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &K3sConfigTemplate{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *K3sConfigTemplate) Default() { + DefaultK3sConfigSpec(&r.Spec.Template.Spec) +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigtemplates,versions=v1beta1,name=validation.k3sconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Validator = &K3sConfigTemplate{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *K3sConfigTemplate) ValidateCreate() error { + return r.Spec.validate(r.Name) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *K3sConfigTemplate) ValidateUpdate(old runtime.Object) error { + return r.Spec.validate(r.Name) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *K3sConfigTemplate) ValidateDelete() error { + return nil +} + +func (r *K3sConfigTemplateSpec) validate(name string) error { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))...) + + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid(GroupVersion.WithKind("K3sConfigTemplate").GroupKind(), name, allErrs) +} diff --git a/bootstrap/k3s/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/k3s/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..19ff19ba --- /dev/null +++ b/bootstrap/k3s/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,408 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) { + *out = *in + in.Node.DeepCopyInto(&out.Node) + out.Runtime = in.Runtime + out.Networking = in.Networking +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfiguration. +func (in *AgentConfiguration) DeepCopy() *AgentConfiguration { + if in == nil { + return nil + } + out := new(AgentConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentNetworking) DeepCopyInto(out *AgentNetworking) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNetworking. +func (in *AgentNetworking) DeepCopy() *AgentNetworking { + if in == nil { + return nil + } + out := new(AgentNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentNode) DeepCopyInto(out *AgentNode) { + *out = *in + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNode. +func (in *AgentNode) DeepCopy() *AgentNode { + if in == nil { + return nil + } + out := new(AgentNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentRuntime) DeepCopyInto(out *AgentRuntime) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentRuntime. +func (in *AgentRuntime) DeepCopy() *AgentRuntime { + if in == nil { + return nil + } + out := new(AgentRuntime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfig) DeepCopyInto(out *K3sConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfig. +func (in *K3sConfig) DeepCopy() *K3sConfig { + if in == nil { + return nil + } + out := new(K3sConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigList) DeepCopyInto(out *K3sConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]K3sConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigList. +func (in *K3sConfigList) DeepCopy() *K3sConfigList { + if in == nil { + return nil + } + out := new(K3sConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigSpec) DeepCopyInto(out *K3sConfigSpec) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]apiv1beta1.File, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + **out = **in + } + if in.ServerConfiguration != nil { + in, out := &in.ServerConfiguration, &out.ServerConfiguration + *out = new(ServerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AgentConfiguration != nil { + in, out := &in.AgentConfiguration, &out.AgentConfiguration + *out = new(AgentConfiguration) + (*in).DeepCopyInto(*out) + } + if in.PreK3sCommands != nil { + in, out := &in.PreK3sCommands, &out.PreK3sCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostK3sCommands != nil { + in, out := &in.PostK3sCommands, &out.PostK3sCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigSpec. +func (in *K3sConfigSpec) DeepCopy() *K3sConfigSpec { + if in == nil { + return nil + } + out := new(K3sConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigStatus) DeepCopyInto(out *K3sConfigStatus) { + *out = *in + if in.BootstrapData != nil { + in, out := &in.BootstrapData, &out.BootstrapData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.DataSecretName != nil { + in, out := &in.DataSecretName, &out.DataSecretName + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(cluster_apiapiv1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigStatus. +func (in *K3sConfigStatus) DeepCopy() *K3sConfigStatus { + if in == nil { + return nil + } + out := new(K3sConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigTemplate) DeepCopyInto(out *K3sConfigTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplate. +func (in *K3sConfigTemplate) DeepCopy() *K3sConfigTemplate { + if in == nil { + return nil + } + out := new(K3sConfigTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sConfigTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigTemplateList) DeepCopyInto(out *K3sConfigTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]K3sConfigTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateList. +func (in *K3sConfigTemplateList) DeepCopy() *K3sConfigTemplateList { + if in == nil { + return nil + } + out := new(K3sConfigTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sConfigTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigTemplateResource) DeepCopyInto(out *K3sConfigTemplateResource) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateResource. +func (in *K3sConfigTemplateResource) DeepCopy() *K3sConfigTemplateResource { + if in == nil { + return nil + } + out := new(K3sConfigTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sConfigTemplateSpec) DeepCopyInto(out *K3sConfigTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateSpec. +func (in *K3sConfigTemplateSpec) DeepCopy() *K3sConfigTemplateSpec { + if in == nil { + return nil + } + out := new(K3sConfigTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Listener) DeepCopyInto(out *Listener) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener. +func (in *Listener) DeepCopy() *Listener { + if in == nil { + return nil + } + out := new(Listener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) { + *out = *in + out.Database = in.Database + out.Listener = in.Listener + out.Networking = in.Networking + in.Agent.DeepCopyInto(&out.Agent) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfiguration. +func (in *ServerConfiguration) DeepCopy() *ServerConfiguration { + if in == nil { + return nil + } + out := new(ServerConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/bootstrap/k3s/config/certmanager/certificate.yaml b/bootstrap/k3s/config/certmanager/certificate.yaml new file mode 100644 index 00000000..0f645290 --- /dev/null +++ b/bootstrap/k3s/config/certmanager/certificate.yaml @@ -0,0 +1,25 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize \ No newline at end of file diff --git a/bootstrap/k3s/config/certmanager/kustomization.yaml b/bootstrap/k3s/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..bebea5a5 --- /dev/null +++ b/bootstrap/k3s/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/bootstrap/k3s/config/certmanager/kustomizeconfig.yaml b/bootstrap/k3s/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000..28a895a4 --- /dev/null +++ b/bootstrap/k3s/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames +- kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigs.yaml b/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigs.yaml new file mode 100644 index 00000000..ed9979c6 --- /dev/null +++ b/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigs.yaml @@ -0,0 +1,422 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.1 + creationTimestamp: null + name: k3sconfigs.bootstrap.cluster.x-k8s.io +spec: + group: bootstrap.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: K3sConfig + listKind: K3sConfigList + plural: k3sconfigs + singular: k3sconfig + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster + jsonPath: .metadata.labels['cluster\.x-k8s\.io/cluster-name'] + name: Cluster + type: string + - description: Time duration since creation of K3sConfig + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: K3sConfig is the Schema for the k3sConfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: K3sConfigSpec defines the desired state of K3sConfig + properties: + agentConfiguration: + description: AgentConfiguration defines the k3s agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address to advertise + for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor client + load-balancer. If the supervisor and apiserver are not colocated + an additional port 1 less than this port will also be used + for the apiserver client load-balancer. (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting kubelet with + set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting kubelet with + set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable embedded containerd + and use alternative CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image for containerd + or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private registry configuration + file. + type: string + type: object + type: object + cluster: + description: Cluster defines the k3s cluster Options. + properties: + server: + description: Server which server to connect to, used to join a + cluster. + type: string + token: + description: Token shared secret used to join a server or agent + to a cluster. + type: string + tokenFile: + description: TokenFile file containing the cluster-secret/token. + type: string + type: object + files: + description: Files specifies extra files to be passed to user_data + upon creation. + items: + description: File defines the input for generating write_files in + cloud-init. + properties: + append: + description: Append specifies whether to append Content to existing + file if Path exists. + type: boolean + content: + description: Content is the actual content of the file. + type: string + contentFrom: + description: ContentFrom is a referenced source of content to + populate the file. + properties: + secret: + description: Secret represents a secret that should populate + this file. + properties: + key: + description: Key is the key in the secret's data map + for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of the file contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the file, e.g. + "root:root". + type: string + path: + description: Path specifies the full path on disk where to store + the file. + type: string + permissions: + description: Permissions specifies the permissions to assign + to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + postK3sCommands: + description: PostK3sCommands specifies extra commands to run after + k3s setup runs + items: + type: string + type: array + preK3sCommands: + description: PreK3sCommands specifies extra commands to run before + k3s setup runs + items: + type: string + type: array + serverConfiguration: + description: ServerConfiguration defines the k3s server configuration. + properties: + agent: + description: Agent is the agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address to advertise + for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor client + load-balancer. If the supervisor and apiserver are not + colocated an additional port 1 less than this port will + also be used for the apiserver client load-balancer. + (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting kubelet + with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting kubelet + with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable embedded + containerd and use alternative CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image for containerd + or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private registry + configuration file. + type: string + type: object + type: object + database: + description: Database is the database configuration. + properties: + clusterInit: + description: ClusterInit initialize a new cluster using embedded + Etcd. + type: boolean + dataStoreCAFile: + description: DataStoreCAFile TLS Certificate Authority file + used to secure datastore backend communication. + type: string + dataStoreCertFile: + description: DataStoreCertFile TLS certification file used + to secure datastore backend communication. + type: string + dataStoreEndPoint: + description: DataStoreEndPoint specify etcd, Mysql, Postgres, + or Sqlite (default) data source name. + type: string + dataStoreKeyFile: + description: DataStoreKeyFile TLS key file used to secure + datastore backend communication. + type: string + type: object + listener: + description: Listener is the listener configuration. + properties: + advertiseAddress: + description: AdvertiseAddress IP address that apiserver uses + to advertise to members of the cluster. + type: string + advertisePort: + description: 'AdvertisePort Port that apiserver uses to advertise + to members of the cluster (default: listen-port).' + type: integer + bindAddress: + description: BindAddress k3s bind address. + type: string + httpsListenPort: + description: HTTPSListenPort HTTPS listen port. + type: integer + tlsSan: + description: TLSSan Add additional hostname or IP as a Subject + Alternative Name in the TLS cert. + type: string + type: object + networking: + description: Networking is the networking configuration. + properties: + clusterCIDR: + description: ClusterCIDR Network CIDR to use for pod IPs. + type: string + clusterDNS: + description: ClusterDNS cluster IP for coredns service. Should + be in your service-cidr range. + type: string + clusterDomain: + description: ClusterDomain cluster Domain. + type: string + flannelBackend: + description: 'FlannelBackend One of ‘none’, ‘vxlan’, ‘ipsec’, + ‘host-gw’, or ‘wireguard’. (default: vxlan)' + type: string + serviceCIDR: + description: ServiceCIDR Network CIDR to use for services + IPs. + type: string + serviceNodePortRange: + description: ServiceNodePortRange Port range to reserve for + services with NodePort visibility. + type: string + type: object + type: object + version: + description: Version specifies the k3s version + type: string + type: object + status: + description: K3sConfigStatus defines the observed state of K3sConfig + properties: + bootstrapData: + format: byte + type: string + conditions: + description: Conditions defines current service state of the K3sConfig. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + dataSecretName: + description: DataSecretName is the name of the secret that stores + the bootstrap data script. + type: string + failureMessage: + description: FailureMessage will be set on non-retryable errors + type: string + failureReason: + description: FailureReason will be set on non-retryable errors + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed + by the controller. + format: int64 + type: integer + ready: + description: Ready indicates the BootstrapData field is ready to be + consumed + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigtemplates.yaml b/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigtemplates.yaml new file mode 100644 index 00000000..2e34dfeb --- /dev/null +++ b/bootstrap/k3s/config/crd/bases/bootstrap.cluster.x-k8s.io_k3sconfigtemplates.yaml @@ -0,0 +1,368 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.1 + creationTimestamp: null + name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io +spec: + group: bootstrap.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: K3sConfigTemplate + listKind: K3sConfigTemplateList + plural: k3sconfigtemplates + singular: k3sconfigtemplate + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Time duration since creation of K3sConfigTemplate + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: K3sConfigTemplate is the Schema for the k3sconfigtemplates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: K3sConfigTemplateSpec defines the desired state of K3sConfigTemplate + properties: + template: + description: K3sConfigTemplateResource defines the Template structure + properties: + spec: + description: K3sConfigSpec defines the desired state of K3sConfig + properties: + agentConfiguration: + description: AgentConfiguration defines the k3s agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking + configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address to + advertise for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf + file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor + client load-balancer. If the supervisor and apiserver + are not colocated an additional port 1 less than + this port will also be used for the apiserver client + load-balancer. (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting kubelet + with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting kubelet + with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable embedded + containerd and use alternative CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image for + containerd or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private registry + configuration file. + type: string + type: object + type: object + cluster: + description: Cluster defines the k3s cluster Options. + properties: + server: + description: Server which server to connect to, used to + join a cluster. + type: string + token: + description: Token shared secret used to join a server + or agent to a cluster. + type: string + tokenFile: + description: TokenFile file containing the cluster-secret/token. + type: string + type: object + files: + description: Files specifies extra files to be passed to user_data + upon creation. + items: + description: File defines the input for generating write_files + in cloud-init. + properties: + append: + description: Append specifies whether to append Content + to existing file if Path exists. + type: boolean + content: + description: Content is the actual content of the file. + type: string + contentFrom: + description: ContentFrom is a referenced source of content + to populate the file. + properties: + secret: + description: Secret represents a secret that should + populate this file. + properties: + key: + description: Key is the key in the secret's + data map for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of the + file contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the file, + e.g. "root:root". + type: string + path: + description: Path specifies the full path on disk where + to store the file. + type: string + permissions: + description: Permissions specifies the permissions to + assign to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + postK3sCommands: + description: PostK3sCommands specifies extra commands to run + after k3s setup runs + items: + type: string + type: array + preK3sCommands: + description: PreK3sCommands specifies extra commands to run + before k3s setup runs + items: + type: string + type: array + serverConfiguration: + description: ServerConfiguration defines the k3s server configuration. + properties: + agent: + description: Agent is the agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking + configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address + to advertise for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for + node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf + file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor + client load-balancer. If the supervisor and + apiserver are not colocated an additional port + 1 less than this port will also be used for + the apiserver client load-balancer. (default: + 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting + kubelet with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting + kubelet with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime + configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable + embedded containerd and use alternative CRI + implementation. + type: string + pauseImage: + description: PauseImage Customized pause image + for containerd or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private + registry configuration file. + type: string + type: object + type: object + database: + description: Database is the database configuration. + properties: + clusterInit: + description: ClusterInit initialize a new cluster + using embedded Etcd. + type: boolean + dataStoreCAFile: + description: DataStoreCAFile TLS Certificate Authority + file used to secure datastore backend communication. + type: string + dataStoreCertFile: + description: DataStoreCertFile TLS certification file + used to secure datastore backend communication. + type: string + dataStoreEndPoint: + description: DataStoreEndPoint specify etcd, Mysql, + Postgres, or Sqlite (default) data source name. + type: string + dataStoreKeyFile: + description: DataStoreKeyFile TLS key file used to + secure datastore backend communication. + type: string + type: object + listener: + description: Listener is the listener configuration. + properties: + advertiseAddress: + description: AdvertiseAddress IP address that apiserver + uses to advertise to members of the cluster. + type: string + advertisePort: + description: 'AdvertisePort Port that apiserver uses + to advertise to members of the cluster (default: + listen-port).' + type: integer + bindAddress: + description: BindAddress k3s bind address. + type: string + httpsListenPort: + description: HTTPSListenPort HTTPS listen port. + type: integer + tlsSan: + description: TLSSan Add additional hostname or IP + as a Subject Alternative Name in the TLS cert. + type: string + type: object + networking: + description: Networking is the networking configuration. + properties: + clusterCIDR: + description: ClusterCIDR Network CIDR to use for pod + IPs. + type: string + clusterDNS: + description: ClusterDNS cluster IP for coredns service. + Should be in your service-cidr range. + type: string + clusterDomain: + description: ClusterDomain cluster Domain. + type: string + flannelBackend: + description: 'FlannelBackend One of ‘none’, ‘vxlan’, + ‘ipsec’, ‘host-gw’, or ‘wireguard’. (default: vxlan)' + type: string + serviceCIDR: + description: ServiceCIDR Network CIDR to use for services + IPs. + type: string + serviceNodePortRange: + description: ServiceNodePortRange Port range to reserve + for services with NodePort visibility. + type: string + type: object + type: object + version: + description: Version specifies the k3s version + type: string + type: object + type: object + required: + - template + type: object + type: object + served: true + storage: true + subresources: {} diff --git a/bootstrap/k3s/config/crd/kustomization.yaml b/bootstrap/k3s/config/crd/kustomization.yaml new file mode 100644 index 00000000..71b422e9 --- /dev/null +++ b/bootstrap/k3s/config/crd/kustomization.yaml @@ -0,0 +1,27 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/bootstrap.cluster.x-k8s.io_k3sconfigs.yaml +- bases/bootstrap.cluster.x-k8s.io_k3sconfigtemplates.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +commonLabels: + cluster.x-k8s.io/v1beta1: v1beta1 + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- patches/webhook_in_k3sconfigs.yaml +- patches/webhook_in_k3sconfigtemplates.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +- patches/cainjection_in_k3sconfigs.yaml +- patches/cainjection_in_k3sconfigtemplates.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/bootstrap/k3s/config/crd/kustomizeconfig.yaml b/bootstrap/k3s/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/bootstrap/k3s/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigs.yaml b/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigs.yaml new file mode 100644 index 00000000..89973785 --- /dev/null +++ b/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigs.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: k3sconfigs.bootstrap.cluster.x-k8s.io diff --git a/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigtemplates.yaml b/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigtemplates.yaml new file mode 100644 index 00000000..5722b7b3 --- /dev/null +++ b/bootstrap/k3s/config/crd/patches/cainjection_in_k3sconfigtemplates.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io diff --git a/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigs.yaml b/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigs.yaml new file mode 100644 index 00000000..75b66bfe --- /dev/null +++ b/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigs.yaml @@ -0,0 +1,18 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: k3sconfigs.bootstrap.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigtemplates.yaml b/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigtemplates.yaml new file mode 100644 index 00000000..79e1942f --- /dev/null +++ b/bootstrap/k3s/config/crd/patches/webhook_in_k3sconfigtemplates.yaml @@ -0,0 +1,18 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/bootstrap/k3s/config/default/kustomization.yaml b/bootstrap/k3s/config/default/kustomization.yaml new file mode 100644 index 00000000..105a8e5f --- /dev/null +++ b/bootstrap/k3s/config/default/kustomization.yaml @@ -0,0 +1,54 @@ +namePrefix: capkk-k3s-bootstrap- +namespace: capkk-k3s-bootstrap-system + +commonLabels: + cluster.x-k8s.io/provider: "bootstrap-k3s" + +resources: + - namespace.yaml + +bases: + - ../rbac + - ../manager + - ../crd + - ../certmanager + - ../webhook + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + # Enable webhook. + - manager_webhook_patch.yaml + # Inject certificate in the webhook definition. + - webhookcainjection_patch.yaml + +configurations: + - kustomizeconfig.yaml +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service diff --git a/bootstrap/k3s/config/default/kustomizeconfig.yaml b/bootstrap/k3s/config/default/kustomizeconfig.yaml new file mode 100644 index 00000000..eb191e64 --- /dev/null +++ b/bootstrap/k3s/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/bootstrap/k3s/config/default/manager_image_patch.yaml b/bootstrap/k3s/config/default/manager_image_patch.yaml new file mode 100644 index 00000000..c17636d8 --- /dev/null +++ b/bootstrap/k3s/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: docker.io/kubespheredev/k3s-bootstrap-controller:main + name: manager diff --git a/bootstrap/k3s/config/default/manager_pull_policy.yaml b/bootstrap/k3s/config/default/manager_pull_policy.yaml new file mode 100644 index 00000000..74a0879c --- /dev/null +++ b/bootstrap/k3s/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/bootstrap/k3s/config/default/manager_webhook_patch.yaml b/bootstrap/k3s/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..bccef6d7 --- /dev/null +++ b/bootstrap/k3s/config/default/manager_webhook_patch.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert diff --git a/bootstrap/k3s/config/default/namespace.yaml b/bootstrap/k3s/config/default/namespace.yaml new file mode 100644 index 00000000..8b55c3cd --- /dev/null +++ b/bootstrap/k3s/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/bootstrap/k3s/config/default/webhookcainjection_patch.yaml b/bootstrap/k3s/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000..04c08d02 --- /dev/null +++ b/bootstrap/k3s/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/bootstrap/k3s/config/manager/kustomization.yaml b/bootstrap/k3s/config/manager/kustomization.yaml new file mode 100644 index 00000000..7394a6d0 --- /dev/null +++ b/bootstrap/k3s/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: + - manager.yaml diff --git a/bootstrap/k3s/config/manager/manager.yaml b/bootstrap/k3s/config/manager/manager.yaml new file mode 100644 index 00000000..34ae98a9 --- /dev/null +++ b/bootstrap/k3s/config/manager/manager.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--metrics-bind-addr=localhost:8080" + image: controller:latest + name: manager + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/bootstrap/k3s/config/rbac/kustomization.yaml b/bootstrap/k3s/config/rbac/kustomization.yaml new file mode 100644 index 00000000..92215861 --- /dev/null +++ b/bootstrap/k3s/config/rbac/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml diff --git a/bootstrap/k3s/config/rbac/leader_election_role.yaml b/bootstrap/k3s/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..4190ec80 --- /dev/null +++ b/bootstrap/k3s/config/rbac/leader_election_role.yaml @@ -0,0 +1,37 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/bootstrap/k3s/config/rbac/leader_election_role_binding.yaml b/bootstrap/k3s/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..a73dfa95 --- /dev/null +++ b/bootstrap/k3s/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: + - kind: ServiceAccount + name: manager + namespace: system diff --git a/bootstrap/k3s/config/rbac/role.yaml b/bootstrap/k3s/config/rbac/role.yaml new file mode 100644 index 00000000..4eed8bfc --- /dev/null +++ b/bootstrap/k3s/config/rbac/role.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - events + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - k3sconfigs + - k3sconfigs/finalizers + - k3sconfigs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - clusters/status + - machinepools + - machinepools/status + - machines + - machines/status + verbs: + - get + - list + - watch diff --git a/bootstrap/k3s/config/rbac/role_binding.yaml b/bootstrap/k3s/config/rbac/role_binding.yaml new file mode 100644 index 00000000..3ffc9c2e --- /dev/null +++ b/bootstrap/k3s/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: + - kind: ServiceAccount + name: manager + namespace: system diff --git a/bootstrap/k3s/config/rbac/service_account.yaml b/bootstrap/k3s/config/rbac/service_account.yaml new file mode 100644 index 00000000..77f747b5 --- /dev/null +++ b/bootstrap/k3s/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfig.yaml b/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfig.yaml new file mode 100644 index 00000000..f2b353bc --- /dev/null +++ b/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfig.yaml @@ -0,0 +1,6 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K3sConfig +metadata: + name: k3sconfig-sample +spec: + # TODO(user): Add fields here diff --git a/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfigtemplate.yaml b/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfigtemplate.yaml new file mode 100644 index 00000000..c6d495d4 --- /dev/null +++ b/bootstrap/k3s/config/samples/bootstrap_v1beta1_k3sconfigtemplate.yaml @@ -0,0 +1,6 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K3sConfigTemplate +metadata: + name: k3sconfigtemplate-sample +spec: + # TODO(user): Add fields here diff --git a/bootstrap/k3s/config/samples/kustomization.yaml b/bootstrap/k3s/config/samples/kustomization.yaml new file mode 100644 index 00000000..c60238ea --- /dev/null +++ b/bootstrap/k3s/config/samples/kustomization.yaml @@ -0,0 +1,5 @@ +## Append samples you want in your CSV to this file as resources ## +resources: +- bootstrap_v1beta1_k3sconfig.yaml +- bootstrap_v1beta1_k3sconfigtemplate.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/bootstrap/k3s/config/scorecard/bases/config.yaml b/bootstrap/k3s/config/scorecard/bases/config.yaml new file mode 100644 index 00000000..c7704784 --- /dev/null +++ b/bootstrap/k3s/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/bootstrap/k3s/config/scorecard/kustomization.yaml b/bootstrap/k3s/config/scorecard/kustomization.yaml new file mode 100644 index 00000000..50cd2d08 --- /dev/null +++ b/bootstrap/k3s/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/bootstrap/k3s/config/scorecard/patches/basic.config.yaml b/bootstrap/k3s/config/scorecard/patches/basic.config.yaml new file mode 100644 index 00000000..4a6c8167 --- /dev/null +++ b/bootstrap/k3s/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: basic + test: basic-check-spec-test diff --git a/bootstrap/k3s/config/scorecard/patches/olm.config.yaml b/bootstrap/k3s/config/scorecard/patches/olm.config.yaml new file mode 100644 index 00000000..c342410a --- /dev/null +++ b/bootstrap/k3s/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.22.2 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/bootstrap/k3s/config/webhook/kustomization.yaml b/bootstrap/k3s/config/webhook/kustomization.yaml new file mode 100644 index 00000000..9cf26134 --- /dev/null +++ b/bootstrap/k3s/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/bootstrap/k3s/config/webhook/kustomizeconfig.yaml b/bootstrap/k3s/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000..25e21e3c --- /dev/null +++ b/bootstrap/k3s/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/bootstrap/k3s/config/webhook/manifests.yaml b/bootstrap/k3s/config/webhook/manifests.yaml new file mode 100644 index 00000000..8caf8749 --- /dev/null +++ b/bootstrap/k3s/config/webhook/manifests.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig + failurePolicy: Fail + name: default.k3sconfig.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3sconfigs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate + failurePolicy: Fail + name: default.k3sconfigtemplate.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3sconfigtemplates + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.k3sconfig.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3sconfigs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.k3sconfigtemplate.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3sconfigtemplates + sideEffects: None diff --git a/bootstrap/k3s/config/webhook/service.yaml b/bootstrap/k3s/config/webhook/service.yaml new file mode 100644 index 00000000..3f638bd9 --- /dev/null +++ b/bootstrap/k3s/config/webhook/service.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/pkg/service/binary/doc.go b/bootstrap/k3s/controllers/doc.go similarity index 86% rename from pkg/service/binary/doc.go rename to bootstrap/k3s/controllers/doc.go index 94f031ce..98b7479a 100644 --- a/pkg/service/binary/doc.go +++ b/bootstrap/k3s/controllers/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package binary define the binaries operations on the remote instance. -package binary +// Package controllers contains k3s config controllers. +package controllers diff --git a/bootstrap/k3s/controllers/k3sconfig_controller.go b/bootstrap/k3s/controllers/k3sconfig_controller.go new file mode 100644 index 00000000..d6d675ff --- /dev/null +++ b/bootstrap/k3s/controllers/k3sconfig_controller.go @@ -0,0 +1,764 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + bootstraputil "k8s.io/cluster-bootstrap/token/util" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bsutil "sigs.k8s.io/cluster-api/bootstrap/util" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + "github.com/kubesphere/kubekey/bootstrap/k3s/pkg/cloudinit" + "github.com/kubesphere/kubekey/bootstrap/k3s/pkg/locking" + k3stypes "github.com/kubesphere/kubekey/bootstrap/k3s/pkg/types" + kklog "github.com/kubesphere/kubekey/util/log" + "github.com/kubesphere/kubekey/util/secret" +) + +// InitLocker is a lock that is used around kubeadm init. +type InitLocker interface { + Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool + Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool +} + +// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs;k3sconfigs/status;k3sconfigs/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machinesets;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete + +// K3sConfigReconciler reconciles a K3sConfig object +type K3sConfigReconciler struct { + client.Client + K3sInitLock InitLocker + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// Scope is a scoped struct used during reconciliation. +type Scope struct { + logr.Logger + Config *infrabootstrapv1.K3sConfig + ConfigOwner *bsutil.ConfigOwner + Cluster *clusterv1.Cluster +} + +// SetupWithManager sets up the controller with the Manager. +func (r *K3sConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + if r.K3sInitLock == nil { + r.K3sInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient()) + } + + b := ctrl.NewControllerManagedBy(mgr). + For(&infrabootstrapv1.K3sConfig{}). + WithOptions(options). + Watches( + &source.Kind{Type: &clusterv1.Machine{}}, + handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), + ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) + + if feature.Gates.Enabled(feature.MachinePool) { + b = b.Watches( + &source.Kind{Type: &expv1.MachinePool{}}, + handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), + ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) + } + + c, err := b.Build(r) + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + err = c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + handler.EnqueueRequestsFromMapFunc(r.ClusterToK3sConfigs), + predicates.All(ctrl.LoggerFrom(ctx), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + ), + ) + if err != nil { + return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") + } + + return nil +} + +// Reconcile handles K3sConfig events. +func (r *K3sConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + log := ctrl.LoggerFrom(ctx) + + // Lookup the kubeadm config + config := &infrabootstrapv1.K3sConfig{} + if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "Failed to get config") + return ctrl.Result{}, err + } + + // AddOwners adds the owners of K3sConfig as k/v pairs to the logger. + // Specifically, it will add K3sControlPlane, MachineSet and MachineDeployment. + ctx, log, err := kklog.AddOwners(ctx, r.Client, config) + if err != nil { + return ctrl.Result{}, err + } + + // Look up the owner of this k3s config if there is one + configOwner, err := bsutil.GetConfigOwner(ctx, r.Client, config) + if apierrors.IsNotFound(err) { + // Could not find the owner yet, this is not an error and will rereconcile when the owner gets set. + return ctrl.Result{}, nil + } + if err != nil { + log.Error(err, "Failed to get owner") + return ctrl.Result{}, err + } + if configOwner == nil { + return ctrl.Result{}, nil + } + log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) + + log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName())) + ctx = ctrl.LoggerInto(ctx, log) + + // Lookup the cluster the config owner is associated with + cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) + if err != nil { + if errors.Cause(err) == util.ErrNoCluster { + log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind())) + return ctrl.Result{}, nil + } + + if apierrors.IsNotFound(err) { + log.Info("Cluster does not exist yet, waiting until it is created") + return ctrl.Result{}, nil + } + log.Error(err, "Could not get cluster with metadata") + return ctrl.Result{}, err + } + + if annotations.IsPaused(cluster, config) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + scope := &Scope{ + Logger: log, + Config: config, + ConfigOwner: configOwner, + Cluster: cluster, + } + + // Initialize the patch helper. + patchHelper, err := patch.NewHelper(config, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Attempt to Patch the K3sConfig object and status after each reconciliation if no error occurs. + defer func() { + // always update the readyCondition; the summary is represented using the "1 of x completed" notation. + conditions.SetSummary(config, + conditions.WithConditions( + bootstrapv1.DataSecretAvailableCondition, + bootstrapv1.CertificatesAvailableCondition, + ), + ) + // Patch ObservedGeneration only if the reconciliation completed successfully + var patchOpts []patch.Option + if retErr == nil { + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + } + if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil { + log.Error(retErr, "Failed to patch config") + if retErr == nil { + retErr = err + } + } + }() + + switch { + // Wait for the infrastructure to be ready. + case !cluster.Status.InfrastructureReady: + log.Info("Cluster infrastructure is not ready, waiting") + conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + // Reconcile status for machines that already have a secret reference, but our status isn't up-to-date. + // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. + case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil): + config.Status.Ready = true + config.Status.DataSecretName = configOwner.DataSecretName() + conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + return ctrl.Result{}, nil + // Status is ready means a config has been generated. + case config.Status.Ready: + return ctrl.Result{}, nil + } + + // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. + if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + return r.handleClusterNotInitialized(ctx, scope) + } + + // Every other case it's a join scenario + // Nb. in this case ClusterConfiguration and InitConfiguration should not be defined by users, but in case of misconfigurations, CABPK3s simply ignore them + + // Unlock any locks that might have been set during init process + r.K3sInitLock.Unlock(ctx, cluster) + + // if the AgentConfiguration is missing, create a default one + if config.Spec.AgentConfiguration == nil { + log.Info("Creating default AgentConfiguration") + config.Spec.AgentConfiguration = &infrabootstrapv1.AgentConfiguration{} + } + + // it's a control plane join + if configOwner.IsControlPlaneMachine() { + return r.joinControlplane(ctx, scope) + } + + // It's a worker join + return r.joinWorker(ctx, scope) +} + +func (r *K3sConfigReconciler) handleClusterNotInitialized(ctx context.Context, scope *Scope) (_ ctrl.Result, retErr error) { + // initialize the DataSecretAvailableCondition if missing. + // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing + // using the DataSecretGeneratedFailedReason + if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + } + + // if it's NOT a control plane machine, requeue + if !scope.ConfigOwner.IsControlPlaneMachine() { + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + // if the machine has not ClusterConfiguration and InitConfiguration, requeue + if scope.Config.Spec.ServerConfiguration == nil && scope.Config.Spec.AgentConfiguration == nil { + scope.Info("Control plane is not ready, requeing joining control planes until ready.") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + machine := &clusterv1.Machine{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind()) + } + + // acquire the init lock so that only the first machine configured + // as control plane get processed here + // if not the first, requeue + if !r.K3sInitLock.Lock(ctx, scope.Cluster, machine) { + scope.Info("A control plane is already being initialized, requeing until control plane is ready") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + defer func() { + if retErr != nil { + if !r.K3sInitLock.Unlock(ctx, scope.Cluster) { + retErr = kerrors.NewAggregate([]error{retErr, errors.New("failed to unlock the kubeadm init lock")}) + } + } + }() + + scope.Info("Creating BootstrapData for the first control plane") + + if scope.Config.Spec.ServerConfiguration == nil { + scope.Config.Spec.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{} + } + + // injects into config.ClusterConfiguration values from top level object + r.reconcileTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config) + + certificates := secret.NewCertificatesForInitialControlPlane() + err := certificates.LookupOrGenerate( + ctx, + r.Client, + util.ObjectKey(scope.Cluster), + *metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("K3sConfig")), + ) + if err != nil { + conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, err + } + + conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + + t, err := r.generateAndStoreToken(ctx, scope) + if err != nil { + return ctrl.Result{}, err + } + + initData, err := k3stypes.MarshalInitServerConfiguration(&scope.Config.Spec, t) + if err != nil { + scope.Error(err, "Failed to marshal server configuration") + return ctrl.Result{}, err + } + + files, err := r.resolveFiles(ctx, scope.Config) + if err != nil { + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, err + } + + initConfigFile := bootstrapv1.File{ + Path: k3stypes.DefaultK3sConfigLocation, + Content: initData, + Owner: "root:root", + Permissions: "0640", + } + + controlPlaneInput := &cloudinit.ControlPlaneInput{ + BaseUserData: cloudinit.BaseUserData{ + AdditionalFiles: files, + PreK3sCommands: scope.Config.Spec.PreK3sCommands, + PostK3sCommands: scope.Config.Spec.PostK3sCommands, + ConfigFile: initConfigFile, + }, + Certificates: certificates, + } + + bootstrapInitData, err := cloudinit.NewInitControlPlane(controlPlaneInput) + if err != nil { + scope.Error(err, "Failed to generate user data for bootstrap control plane") + return ctrl.Result{}, err + } + + if err := r.storeBootstrapData(ctx, scope, bootstrapInitData); err != nil { + scope.Error(err, "Failed to store bootstrap data") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *K3sConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) { + scope.Info("Creating BootstrapData for the worker node") + + // Ensure that agentConfiguration is properly set for joining node on the current cluster. + if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil { + return ctrl.Result{}, err + } else if !res.IsZero() { + return res, nil + } + + joinWorkerData, err := k3stypes.MarshalJoinAgentConfiguration(scope.Config.Spec.AgentConfiguration) + if err != nil { + scope.Error(err, "Failed to marshal join configuration") + return ctrl.Result{}, err + } + + files, err := r.resolveFiles(ctx, scope.Config) + if err != nil { + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, err + } + + joinConfigFile := bootstrapv1.File{ + Path: k3stypes.DefaultK3sConfigLocation, + Content: joinWorkerData, + Owner: "root:root", + Permissions: "0640", + } + + workerJoinInput := &cloudinit.NodeInput{ + BaseUserData: cloudinit.BaseUserData{ + AdditionalFiles: files, + PreK3sCommands: scope.Config.Spec.PreK3sCommands, + PostK3sCommands: scope.Config.Spec.PostK3sCommands, + ConfigFile: joinConfigFile, + }, + } + + cloudInitData, err := cloudinit.NewNode(workerJoinInput) + if err != nil { + scope.Error(err, "Failed to generate user data for bootstrap control plane") + return ctrl.Result{}, err + } + + if err := r.storeBootstrapData(ctx, scope, cloudInitData); err != nil { + scope.Error(err, "Failed to store bootstrap data") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *K3sConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) { + scope.Info("Creating BootstrapData for the joining control plane") + + if !scope.ConfigOwner.IsControlPlaneMachine() { + return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind()) + } + + if scope.Config.Spec.Cluster == nil { + scope.Config.Spec.Cluster = &infrabootstrapv1.Cluster{} + } + + // Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster. + if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil { + return ctrl.Result{}, err + } else if !res.IsZero() { + return res, nil + } + + joinData, err := k3stypes.MarshalJoinServerConfiguration(scope.Config.Spec.ServerConfiguration) + if err != nil { + scope.Error(err, "Failed to marshal join configuration") + return ctrl.Result{}, err + } + + files, err := r.resolveFiles(ctx, scope.Config) + if err != nil { + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, err + } + + joinConfigFile := bootstrapv1.File{ + Path: k3stypes.DefaultK3sConfigLocation, + Content: joinData, + Owner: "root:root", + Permissions: "0640", + } + + controlPlaneJoinInput := &cloudinit.ControlPlaneInput{ + BaseUserData: cloudinit.BaseUserData{ + AdditionalFiles: files, + PreK3sCommands: scope.Config.Spec.PreK3sCommands, + PostK3sCommands: scope.Config.Spec.PostK3sCommands, + ConfigFile: joinConfigFile, + }, + } + + cloudInitData, err := cloudinit.NewJoinControlPlane(controlPlaneJoinInput) + if err != nil { + scope.Error(err, "Failed to generate user data for bootstrap control plane") + return ctrl.Result{}, err + } + + if err := r.storeBootstrapData(ctx, scope, cloudInitData); err != nil { + scope.Error(err, "Failed to store bootstrap data") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *K3sConfigReconciler) generateAndStoreToken(ctx context.Context, scope *Scope) (string, error) { + t, err := bootstraputil.GenerateBootstrapToken() + if err != nil { + return "", errors.Wrap(err, "unable to generate bootstrap token") + } + + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-token", scope.Cluster.Name), + Namespace: scope.Config.Namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: scope.Cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "K3sConfig", + Name: scope.Config.Name, + UID: scope.Config.UID, + Controller: pointer.Bool(true), + }, + }, + }, + Data: map[string][]byte{ + "value": []byte(t), + }, + Type: clusterv1.ClusterSecretType, + } + + // as secret creation and scope.Config status patch are not atomic operations + // it is possible that secret creation happens but the config.Status patches are not applied + if err := r.Client.Create(ctx, s); err != nil { + if !apierrors.IsAlreadyExists(err) { + return "", errors.Wrapf(err, "failed to create token for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + } + if err := r.Client.Update(ctx, s); err != nil { + return "", errors.Wrapf(err, "failed to update bootstrap token secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + } + } + + return t, nil +} + +// resolveFiles maps .Spec.Files into cloudinit.Files, resolving any object references +// along the way. +func (r *K3sConfigReconciler) resolveFiles(ctx context.Context, cfg *infrabootstrapv1.K3sConfig) ([]bootstrapv1.File, error) { + collected := make([]bootstrapv1.File, 0, len(cfg.Spec.Files)) + + for i := range cfg.Spec.Files { + in := cfg.Spec.Files[i] + if in.ContentFrom != nil { + data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve file source") + } + in.ContentFrom = nil + in.Content = string(data) + } + collected = append(collected, in) + } + + return collected, nil +} + +// resolveSecretFileContent returns file content fetched from a referenced secret object. +func (r *K3sConfigReconciler) resolveSecretFileContent(ctx context.Context, ns string, source bootstrapv1.File) ([]byte, error) { + s := &corev1.Secret{} + key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name} + if err := r.Client.Get(ctx, key, s); err != nil { + if apierrors.IsNotFound(err) { + return nil, errors.Wrapf(err, "secret not found: %s", key) + } + return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key) + } + data, ok := s.Data[source.ContentFrom.Secret.Key] + if !ok { + return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key) + } + return data, nil +} + +// storeBootstrapData creates a new secret with the data passed in as input, +// sets the reference in the configuration status and ready to true. +func (r *K3sConfigReconciler) storeBootstrapData(ctx context.Context, scope *Scope, data []byte) error { + log := ctrl.LoggerFrom(ctx) + + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: scope.Config.Name, + Namespace: scope.Config.Namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: scope.Cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "K3sConfig", + Name: scope.Config.Name, + UID: scope.Config.UID, + Controller: pointer.Bool(true), + }, + }, + }, + Data: map[string][]byte{ + "value": data, + }, + Type: clusterv1.ClusterSecretType, + } + + // as secret creation and scope.Config status patch are not atomic operations + // it is possible that secret creation happens but the config.Status patches are not applied + if err := r.Client.Create(ctx, s); err != nil { + if !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed to create bootstrap data secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + } + log.Info("bootstrap data secret for K3sConfig already exists, updating", "Secret", klog.KObj(s)) + if err := r.Client.Update(ctx, s); err != nil { + return errors.Wrapf(err, "failed to update bootstrap data secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + } + } + scope.Config.Status.DataSecretName = pointer.String(s.Name) + scope.Config.Status.Ready = true + conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) + return nil +} + +func (r *K3sConfigReconciler) reconcileDiscovery(ctx context.Context, cluster *clusterv1.Cluster, config *infrabootstrapv1.K3sConfig) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // if config already contains a file discovery configuration, respect it without further validations + if config.Spec.Cluster.TokenFile != "" { + return ctrl.Result{}, nil + } + + // if BootstrapToken already contains an APIServerEndpoint, respect it; otherwise inject the APIServerEndpoint endpoint defined in cluster status + apiServerEndpoint := config.Spec.Cluster.Server + if apiServerEndpoint == "" { + if !cluster.Spec.ControlPlaneEndpoint.IsValid() { + log.V(1).Info("Waiting for Cluster Controller to set Cluster.Server") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String() + config.Spec.Cluster.Server = fmt.Sprintf("https://%s", apiServerEndpoint) + log.V(3).Info("Altering Cluster.Server", "Server", apiServerEndpoint) + } + + // if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join + if config.Spec.Cluster.Token == "" { + s := &corev1.Secret{} + obj := client.ObjectKey{ + Namespace: config.Namespace, + Name: fmt.Sprintf("%s-token", cluster.Name), + } + + if err := r.Client.Get(ctx, obj, s); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get token for K3sConfig %s/%s", config.Namespace, config.Name) + } + + config.Spec.Cluster.Token = string(s.Data["value"]) + log.V(3).Info("Altering Cluster.Token") + } + + return ctrl.Result{}, nil +} + +// MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue +// request for reconciliation of K3sConfig. +func (r *K3sConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request { + m, ok := o.(*clusterv1.Machine) + if !ok { + panic(fmt.Sprintf("Expected a Machine but got a %T", o)) + } + + var result []ctrl.Request + if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig") { + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + return result +} + +// MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue +// request for reconciliation of K3sConfig. +func (r *K3sConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []ctrl.Request { + m, ok := o.(*expv1.MachinePool) + if !ok { + panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) + } + + var result []ctrl.Request + configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef + if configRef != nil && configRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() { + name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + return result +} + +// ClusterToK3sConfigs is a handler.ToRequestsFunc to be used to enqueue +// requests for reconciliation of K3sConfig. +func (r *K3sConfigReconciler) ClusterToK3sConfigs(o client.Object) []ctrl.Request { + var result []ctrl.Request + + c, ok := o.(*clusterv1.Cluster) + if !ok { + panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) + } + + selectors := []client.ListOption{ + client.InNamespace(c.Namespace), + client.MatchingLabels{ + clusterv1.ClusterLabelName: c.Name, + }, + } + + machineList := &clusterv1.MachineList{} + if err := r.Client.List(context.TODO(), machineList, selectors...); err != nil { + return nil + } + + for _, m := range machineList.Items { + if m.Spec.Bootstrap.ConfigRef != nil && + m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() { + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + } + + if feature.Gates.Enabled(feature.MachinePool) { + machinePoolList := &expv1.MachinePoolList{} + if err := r.Client.List(context.TODO(), machinePoolList, selectors...); err != nil { + return nil + } + + for _, mp := range machinePoolList.Items { + if mp.Spec.Template.Spec.Bootstrap.ConfigRef != nil && + mp.Spec.Template.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() { + name := client.ObjectKey{Namespace: mp.Namespace, Name: mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + } + } + + return result +} + +// reconcileTopLevelObjectSettings injects into config.ClusterConfiguration values from top level objects like cluster and machine. +// The implementation func respect user provided config values, but in case some of them are missing, values from top level objects are used. +func (r *K3sConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, config *infrabootstrapv1.K3sConfig) { + log := ctrl.LoggerFrom(ctx) + + // If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined + if cluster.Spec.ClusterNetwork != nil { + if config.Spec.ServerConfiguration.Networking.ClusterDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" { + config.Spec.ServerConfiguration.Networking.ClusterDomain = cluster.Spec.ClusterNetwork.ServiceDomain + log.V(3).Info("Altering ServerConfiguration.Networking.ClusterDomain", "ClusterDomain", config.Spec.ServerConfiguration.Networking.ClusterDomain) + } + if config.Spec.ServerConfiguration.Networking.ServiceCIDR == "" && + cluster.Spec.ClusterNetwork.Services != nil && + len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { + config.Spec.ServerConfiguration.Networking.ServiceCIDR = cluster.Spec.ClusterNetwork.Services.String() + log.V(3).Info("Altering ServerConfiguration.Networking.ServiceCIDR", "ServiceCIDR", config.Spec.ServerConfiguration.Networking.ServiceCIDR) + } + if config.Spec.ServerConfiguration.Networking.ClusterCIDR == "" && + cluster.Spec.ClusterNetwork.Pods != nil && + len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 { + config.Spec.ServerConfiguration.Networking.ClusterCIDR = cluster.Spec.ClusterNetwork.Pods.String() + log.V(3).Info("Altering ServerConfiguration.Networking.ClusterCIDR", "ClusterCIDR", config.Spec.ServerConfiguration.Networking.ClusterCIDR) + } + } + + // If there are no Version settings defined, use Version from machine, if defined + if config.Spec.Version == "" && machine.Spec.Version != nil { + config.Spec.Version = *machine.Spec.Version + log.V(3).Info("Altering Spec.Version", "Version", config.Spec.Version) + } +} diff --git a/bootstrap/k3s/controllers/suite_test.go b/bootstrap/k3s/controllers/suite_test.go new file mode 100644 index 00000000..76505fda --- /dev/null +++ b/bootstrap/k3s/controllers/suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +//var ( +// env *envtest.Environment +// ctx = ctrl.SetupSignalHandler() +//) +// +//func TestMain(m *testing.M) { +// os.Exit(envtest.Run(ctx, envtest.RunInput{ +// M: m, +// SetupEnv: func(e *envtest.Environment) { env = e }, +// })) +//} diff --git a/bootstrap/k3s/hack/boilerplate.go.txt b/bootstrap/k3s/hack/boilerplate.go.txt new file mode 100644 index 00000000..62802d18 --- /dev/null +++ b/bootstrap/k3s/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/bootstrap/k3s/main.go b/bootstrap/k3s/main.go new file mode 100644 index 00000000..56759d74 --- /dev/null +++ b/bootstrap/k3s/main.go @@ -0,0 +1,193 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package main +package main + +import ( + "flag" + "fmt" + "math/rand" + "os" + "time" + + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/healthz" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + "github.com/kubesphere/kubekey/bootstrap/k3s/controllers" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme)) + utilruntime.Must(expv1.AddToScheme(scheme)) + utilruntime.Must(infrabootstrapv1.AddToScheme(scheme)) + utilruntime.Must(infracontrolplanev1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme +} + +var ( + metricsAddr string + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + k3sConfigConcurrency int + healthAddr string + watchFilterValue string + watchNamespace string + syncPeriod time.Duration + webhookPort int + webhookCertDir string +) + +func main() { + klog.InitFlags(nil) + + rand.Seed(time.Now().UnixNano()) + initFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + ctrl.SetLogger(klogr.New()) + + ctx := ctrl.SetupSignalHandler() + + restConfig := ctrl.GetConfigOrDie() + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent("cluster-api-k3s-bootstrap-manager") + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "k3s-bootstrap-manager-leader-election-capkk", + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + SyncPeriod: &syncPeriod, + ClientDisableCacheFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + Namespace: watchNamespace, + Port: webhookPort, + HealthProbeBindAddress: healthAddr, + CertDir: webhookCertDir, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := (&controllers.K3sConfigReconciler{ + Client: mgr.GetClient(), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(k3sConfigConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "K3sConfig") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + if err = (&infrabootstrapv1.K3sConfig{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "K3sConfig") + os.Exit(1) + } + if err = (&infrabootstrapv1.K3sConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "K3sConfigTemplate") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + setupLog.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func initFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsAddr, "metrics-bind-addr", "localhost:8080", + "The address the metric endpoint binds to.") + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&watchNamespace, "namespace", "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") + + fs.IntVar(&k3sConfigConcurrency, "k3sconfig-concurrency", 10, + "Number of kubeadm configs to process simultaneously") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir, only used when webhook-port is specified.") + + feature.MutableGates.AddFlag(fs) +} + +func concurrency(c int) controller.Options { + return controller.Options{MaxConcurrentReconciles: c} +} diff --git a/bootstrap/k3s/pkg/cloudinit/cloudinit.go b/bootstrap/k3s/pkg/cloudinit/cloudinit.go new file mode 100644 index 00000000..09312308 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/cloudinit.go @@ -0,0 +1,99 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +import ( + "bytes" + _ "embed" + "text/template" + + "github.com/pkg/errors" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" +) + +const ( + // sentinelFileCommand writes a file to /run/cluster-api to signal successful Kubernetes bootstrapping in a way that + // works both for Linux and Windows OS. + sentinelFileCommand = "echo success > /run/cluster-api/bootstrap-success.complete" + cloudConfigHeader = `## template: jinja +#cloud-config +` +) + +// BaseUserData is shared across all the various types of files written to disk. +type BaseUserData struct { + Header string + PreK3sCommands []string + PostK3sCommands []string + AdditionalFiles []bootstrapv1.File + WriteFiles []bootstrapv1.File + ConfigFile bootstrapv1.File + SentinelFileCommand string +} + +func (input *BaseUserData) prepare() error { + input.Header = cloudConfigHeader + input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) + k3sScriptFile, err := generateBootstrapScript(input) + if err != nil { + return errors.Wrap(err, "failed to generate user data for machine install k3s") + } + input.WriteFiles = append(input.WriteFiles, *k3sScriptFile) + input.SentinelFileCommand = sentinelFileCommand + return nil +} + +func generate(kind string, tpl string, data interface{}) ([]byte, error) { + tm := template.New(kind).Funcs(defaultTemplateFuncMap) + if _, err := tm.Parse(filesTemplate); err != nil { + return nil, errors.Wrap(err, "failed to parse files template") + } + + if _, err := tm.Parse(commandsTemplate); err != nil { + return nil, errors.Wrap(err, "failed to parse commands template") + } + + t, err := tm.Parse(tpl) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s template", kind) + } + + var out bytes.Buffer + if err := t.Execute(&out, data); err != nil { + return nil, errors.Wrapf(err, "failed to generate %s template", kind) + } + + return out.Bytes(), nil +} + +var ( + //go:embed k3s-install.sh + k3sBootstrapScript string +) + +func generateBootstrapScript(input interface{}) (*bootstrapv1.File, error) { + k3sScript, err := generate("K3sInstallScript", k3sBootstrapScript, input) + if err != nil { + return nil, errors.Wrap(err, "failed to bootstrap script for machine joins") + } + return &bootstrapv1.File{ + Path: "/usr/local/bin/k3s-install.sh", + Owner: "root", + Permissions: "0755", + Content: string(k3sScript), + }, nil +} diff --git a/bootstrap/k3s/pkg/cloudinit/commands.go b/bootstrap/k3s/pkg/cloudinit/commands.go new file mode 100644 index 00000000..9fefb6d9 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/commands.go @@ -0,0 +1,26 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +const ( + commandsTemplate = `{{- define "commands" -}} +{{ range . }} + - {{printf "%q" .}} +{{- end -}} +{{- end -}} +` +) diff --git a/bootstrap/k3s/pkg/cloudinit/controlplane_init.go b/bootstrap/k3s/pkg/cloudinit/controlplane_init.go new file mode 100644 index 00000000..3781bb16 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/controlplane_init.go @@ -0,0 +1,64 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +import ( + "github.com/pkg/errors" + + "github.com/kubesphere/kubekey/util/secret" +) + +const ( + controlPlaneCloudInit = `{{.Header}} +{{template "files" .WriteFiles}} +- path: /run/cluster-api/placeholder + owner: root:root + permissions: '0640' + content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" +runcmd: +{{- template "commands" .PreK3sCommands }} + - 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh' +{{- template "commands" .PostK3sCommands }} +` +) + +// ControlPlaneInput defines the context to generate a controlplane instance user data. +type ControlPlaneInput struct { + BaseUserData + secret.Certificates + + ServerConfiguration string +} + +// NewInitControlPlane returns the clouding string to be used on initializing a controlplane instance. +func NewInitControlPlane(input *ControlPlaneInput) ([]byte, error) { + input.Header = cloudConfigHeader + input.WriteFiles = input.Certificates.AsFiles() + input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) + k3sScriptFile, err := generateBootstrapScript(input) + if err != nil { + return nil, errors.Wrap(err, "failed to generate user data for machine install k3s") + } + input.WriteFiles = append(input.WriteFiles, *k3sScriptFile) + input.SentinelFileCommand = sentinelFileCommand + userData, err := generate("InitControlplane", controlPlaneCloudInit, input) + if err != nil { + return nil, err + } + + return userData, nil +} diff --git a/bootstrap/k3s/pkg/cloudinit/controlplane_join.go b/bootstrap/k3s/pkg/cloudinit/controlplane_join.go new file mode 100644 index 00000000..707337a8 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/controlplane_join.go @@ -0,0 +1,48 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +import ( + "github.com/pkg/errors" +) + +const ( + controlPlaneJoinCloudInit = `{{.Header}} +{{template "files" .WriteFiles}} +- path: /run/cluster-api/placeholder + owner: root:root + permissions: '0640' + content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" +runcmd: +{{- template "commands" .PreK3sCommands }} + - 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh' +{{- template "commands" .PostK3sCommands }} +` +) + +// NewJoinControlPlane returns the cloudinit string to be used on joining a control plane instance. +func NewJoinControlPlane(input *ControlPlaneInput) ([]byte, error) { + if err := input.prepare(); err != nil { + return nil, err + } + userData, err := generate("JoinControlplane", controlPlaneJoinCloudInit, input) + if err != nil { + return nil, errors.Wrapf(err, "failed to generate user data for machine joining control plane") + } + + return userData, err +} diff --git a/bootstrap/k3s/pkg/cloudinit/doc.go b/bootstrap/k3s/pkg/cloudinit/doc.go new file mode 100644 index 00000000..1a473c7d --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package cloudinit implements kubeadm cloudinit functionality. +package cloudinit diff --git a/bootstrap/k3s/pkg/cloudinit/files.go b/bootstrap/k3s/pkg/cloudinit/files.go new file mode 100644 index 00000000..fd0c3f89 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/files.go @@ -0,0 +1,40 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +const ( + filesTemplate = `{{ define "files" -}} +write_files:{{ range . }} +- path: {{.Path}} + {{ if ne .Encoding "" -}} + encoding: "{{.Encoding}}" + {{ end -}} + {{ if ne .Owner "" -}} + owner: {{.Owner}} + {{ end -}} + {{ if ne .Permissions "" -}} + permissions: '{{.Permissions}}' + {{ end -}} + {{ if .Append -}} + append: true + {{ end -}} + content: | +{{.Content | Indent 6}} +{{- end -}} +{{- end -}} +` +) diff --git a/bootstrap/k3s/pkg/cloudinit/k3s-install.sh b/bootstrap/k3s/pkg/cloudinit/k3s-install.sh new file mode 100644 index 00000000..f30cbe88 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/k3s-install.sh @@ -0,0 +1,946 @@ +#!/bin/sh +# +# Copyright 2022 The KubeSphere Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e +set -o noglob + +# Usage: +# curl ... | ENV_VAR=... sh - +# or +# ENV_VAR=... ./install.sh +# +# Example: +# Installing a server without traefik: +# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh - +# Installing an agent to point at a server: +# curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh - +# +# Environment variables: +# - K3S_* +# Environment variables which begin with K3S_ will be preserved for the +# systemd service to use. Setting K3S_URL without explicitly setting +# a systemd exec command will default the command to "agent", and we +# enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set. +# +# - INSTALL_K3S_SKIP_DOWNLOAD +# If set to true will not download k3s hash or binary. +# +# - INSTALL_K3S_FORCE_RESTART +# If set to true will always restart the K3s service +# +# - INSTALL_K3S_SYMLINK +# If set to 'skip' will not create symlinks, 'force' will overwrite, +# default will symlink if command does not exist in path. +# +# - INSTALL_K3S_SKIP_ENABLE +# If set to true will not enable or start k3s service. +# +# - INSTALL_K3S_SKIP_START +# If set to true will not start k3s service. +# +# - INSTALL_K3S_VERSION +# Version of k3s to download from github. Will attempt to download from the +# stable channel if not specified. +# +# - INSTALL_K3S_COMMIT +# Commit of k3s to download from temporary cloud storage. +# * (for developer & QA use) +# +# - INSTALL_K3S_BIN_DIR +# Directory to install k3s binary, links, and uninstall script to, or use +# /usr/local/bin as the default +# +# - INSTALL_K3S_BIN_DIR_READ_ONLY +# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces +# setting INSTALL_K3S_SKIP_DOWNLOAD=true +# +# - INSTALL_K3S_SYSTEMD_DIR +# Directory to install systemd service and environment files to, or use +# /etc/systemd/system as the default +# +# - INSTALL_K3S_EXEC or script arguments +# Command with flags to use for launching k3s in the systemd service, if +# the command is not specified will default to "agent" if K3S_URL is set +# or "server" if not. The final systemd command resolves to a combination +# of EXEC and script args ($@). +# +# The following commands result in the same behavior: +# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s - +# curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s - +# curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik +# curl ... | sh -s - server --disable=traefik +# curl ... | sh -s - --disable=traefik +# +# - INSTALL_K3S_NAME +# Name of systemd service to create, will default from the k3s exec command +# if not specified. If specified the name will be prefixed with 'k3s-'. +# +# - INSTALL_K3S_TYPE +# Type of systemd service to create, will default from the k3s exec command +# if not specified. +# +# - INSTALL_K3S_SELINUX_WARN +# If set to true will continue if k3s-selinux policy is not found. +# +# - INSTALL_K3S_SKIP_SELINUX_RPM +# If set to true will skip automatic installation of the k3s RPM. +# +# - INSTALL_K3S_CHANNEL_URL +# Channel URL for fetching k3s download URL. +# Defaults to 'https://update.k3s.io/v1-release/channels'. +# +# - INSTALL_K3S_CHANNEL +# Channel to use for fetching k3s download URL. +# Defaults to 'stable'. + +GITHUB_URL=https://github.com/k3s-io/k3s/releases +STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds +DOWNLOADER= + +# --- helper functions for logs --- +info() +{ + echo '[INFO] ' "$@" +} +warn() +{ + echo '[WARN] ' "$@" >&2 +} +fatal() +{ + echo '[ERROR] ' "$@" >&2 + exit 1 +} + +# --- fatal if no systemd or openrc --- +verify_system() { + if [ -x /sbin/openrc-run ]; then + HAS_OPENRC=true + return + fi + if [ -x /bin/systemctl ] || type systemctl > /dev/null 2>&1; then + HAS_SYSTEMD=true + return + fi + fatal 'Can not find systemd or openrc to use as a process supervisor for k3s' +} + +# --- add quotes to command arguments --- +quote() { + for arg in "$@"; do + printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" + done +} + +# --- add indentation and trailing slash to quoted args --- +quote_indent() { + printf ' \\\n' + for arg in "$@"; do + printf '\t%s \\\n' "$(quote "$arg")" + done +} + +# --- escape most punctuation characters, except quotes, forward slash, and space --- +escape() { + printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;' +} + +# --- escape double quotes --- +escape_dq() { + printf '%s' "$@" | sed -e 's/"/\\"/g' +} + +# --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise --- +verify_k3s_url() { + case "${K3S_URL}" in + "") + ;; + https://*) + ;; + *) + fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})" + ;; + esac +} + +# --- define needed environment variables --- +setup_env() { + # --- use command args if passed or create default --- + case "$1" in + # --- if we only have flags discover if command should be server or agent --- + (-*|"") + if [ -z "${K3S_URL}" ]; then + CMD_K3S=server + else + if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then + fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined." + fi + CMD_K3S=agent + fi + ;; + # --- command is provided --- + (*) + CMD_K3S=$1 + shift + ;; + esac + + verify_k3s_url + + CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")" + + # --- use systemd name if defined or create default --- + if [ -n "${INSTALL_K3S_NAME}" ]; then + SYSTEM_NAME=k3s-${INSTALL_K3S_NAME} + else + if [ "${CMD_K3S}" = server ]; then + SYSTEM_NAME=k3s + else + SYSTEM_NAME=k3s-${CMD_K3S} + fi + fi + + # --- check for invalid characters in system name --- + valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' ) + if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then + invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g') + fatal "Invalid characters for system name: + ${SYSTEM_NAME} + ${invalid_chars}" + fi + + # --- use sudo if we are not already root --- + SUDO=sudo + if [ $(id -u) -eq 0 ]; then + SUDO= + fi + + # --- use systemd type if defined or create default --- + if [ -n "${INSTALL_K3S_TYPE}" ]; then + SYSTEMD_TYPE=${INSTALL_K3S_TYPE} + else + SYSTEMD_TYPE=notify + fi + + # --- use binary install directory if defined or create default --- + if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then + BIN_DIR=${INSTALL_K3S_BIN_DIR} + else + # --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists + BIN_DIR=/usr/local/bin + if ! $SUDO sh -c "touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test"; then + if [ -d /opt/bin ]; then + BIN_DIR=/opt/bin + fi + fi + fi + + # --- use systemd directory if defined or create default --- + if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then + SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}" + else + SYSTEMD_DIR=/etc/systemd/system + fi + + # --- set related files from system name --- + SERVICE_K3S=${SYSTEM_NAME}.service + UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh} + KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh} + + # --- use service or environment location depending on systemd/openrc --- + if [ "${HAS_SYSTEMD}" = true ]; then + FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S} + FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env + elif [ "${HAS_OPENRC}" = true ]; then + $SUDO mkdir -p /etc/rancher/k3s + FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME} + FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env + fi + + # --- get hash of config & exec for currently installed k3s --- + PRE_INSTALL_HASHES=$(get_installed_hashes) + + # --- if bin directory is read only skip download --- + if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then + INSTALL_K3S_SKIP_DOWNLOAD=true + fi + + # --- setup channel values + INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'} + INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'} +} + +# --- check if skip download environment variable set --- +can_skip_download_binary() { + if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != binary ]; then + return 1 + fi +} + +can_skip_download_selinux() { + if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then + return 1 + fi +} + +# --- verify an executable k3s binary is installed --- +verify_k3s_is_executable() { + if [ ! -x ${BIN_DIR}/k3s ]; then + fatal "Executable k3s binary not found at ${BIN_DIR}/k3s" + fi +} + +# --- set arch and suffix, fatal if architecture not supported --- +setup_verify_arch() { + if [ -z "$ARCH" ]; then + ARCH=$(uname -m) + fi + case $ARCH in + amd64) + ARCH=amd64 + SUFFIX= + ;; + x86_64) + ARCH=amd64 + SUFFIX= + ;; + arm64) + ARCH=arm64 + SUFFIX=-${ARCH} + ;; + s390x) + ARCH=s390x + SUFFIX=-${ARCH} + ;; + aarch64) + ARCH=arm64 + SUFFIX=-${ARCH} + ;; + arm*) + ARCH=arm + SUFFIX=-${ARCH}hf + ;; + *) + fatal "Unsupported architecture $ARCH" + esac +} + +# --- verify existence of network downloader executable --- +verify_downloader() { + # Return failure if it doesn't exist or is no executable + [ -x "$(command -v $1)" ] || return 1 + + # Set verified executable as our downloader program and return success + DOWNLOADER=$1 + return 0 +} + +# --- create temporary directory and cleanup when done --- +setup_tmp() { + TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) + TMP_HASH=${TMP_DIR}/k3s.hash + TMP_BIN=${TMP_DIR}/k3s.bin + cleanup() { + code=$? + set +e + trap - EXIT + rm -rf ${TMP_DIR} + exit $code + } + trap cleanup INT EXIT +} + +# --- use desired k3s version if defined or find version from channel --- +get_release_version() { + if [ -n "${INSTALL_K3S_COMMIT}" ]; then + VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" + elif [ -n "${INSTALL_K3S_VERSION}" ]; then + VERSION_K3S=${INSTALL_K3S_VERSION} + else + info "Finding release for channel ${INSTALL_K3S_CHANNEL}" + version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}" + case $DOWNLOADER in + curl) + VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||') + ;; + wget) + VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||') + ;; + *) + fatal "Incorrect downloader executable '$DOWNLOADER'" + ;; + esac + fi + info "Using ${VERSION_K3S} as release" +} + +# --- download from github url --- +download() { + [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' + + case $DOWNLOADER in + curl) + curl -o $1 -sfL $2 + ;; + wget) + wget -qO $1 $2 + ;; + *) + fatal "Incorrect executable '$DOWNLOADER'" + ;; + esac + + # Abort if download command failed + [ $? -eq 0 ] || fatal 'Download failed' +} + +# --- download hash from github url --- +download_hash() { + if [ -n "${INSTALL_K3S_COMMIT}" ]; then + HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum + else + HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt + fi + info "Downloading hash ${HASH_URL}" + download ${TMP_HASH} ${HASH_URL} + HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) + HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} +} + +# --- check hash against installed version --- +installed_hash_matches() { + if [ -x ${BIN_DIR}/k3s ]; then + HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s) + HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*} + if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then + return + fi + fi + return 1 +} + +# --- download binary from github url --- +download_binary() { + if [ -n "${INSTALL_K3S_COMMIT}" ]; then + BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} + else + BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} + fi + info "Downloading binary ${BIN_URL}" + download ${TMP_BIN} ${BIN_URL} +} + +# --- verify downloaded binary hash --- +verify_binary() { + info "Verifying binary download" + HASH_BIN=$(sha256sum ${TMP_BIN}) + HASH_BIN=${HASH_BIN%%[[:blank:]]*} + if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then + fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}" + fi +} + +# --- setup permissions and move binary to system directory --- +setup_binary() { + chmod 755 ${TMP_BIN} + info "Installing k3s to ${BIN_DIR}/k3s" + $SUDO chown root:root ${TMP_BIN} + $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s +} + +# --- setup selinux policy --- +setup_selinux() { + case ${INSTALL_K3S_CHANNEL} in + *testing) + rpm_channel=testing + ;; + *latest) + rpm_channel=latest + ;; + *) + rpm_channel=stable + ;; + esac + + rpm_site="rpm.rancher.io" + if [ "${rpm_channel}" = "testing" ]; then + rpm_site="rpm-testing.rancher.io" + fi + + [ -r /etc/os-release ] && . /etc/os-release + if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then + rpm_target=sle + rpm_site_infix=microos + package_installer=zypper + elif [ "${VERSION_ID%%.*}" = "7" ]; then + rpm_target=el7 + rpm_site_infix=centos/7 + package_installer=yum + else + rpm_target=el8 + rpm_site_infix=centos/8 + package_installer=yum + fi + + if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then + package_installer=dnf + fi + + policy_hint="please install: + ${package_installer} install -y container-selinux + ${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/k3s-selinux-0.4-1.${rpm_target}.noarch.rpm +" + + if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then + info "Skipping installation of SELinux RPM" + elif [ "${ID_LIKE:-}" != coreos ] && [ "${VARIANT_ID:-}" != coreos ]; then + install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix} + fi + + policy_error=fatal + if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then + policy_error=warn + fi + + if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then + if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then + $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" + fi + elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then + if [ -x /usr/sbin/transactional-update ]; then + warn "Please reboot your machine to activate the changes and avoid data loss." + else + $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" + fi + fi +} + +install_selinux_rpm() { + if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then + repodir=/etc/yum.repos.d + if [ -d /etc/zypp/repos.d ]; then + repodir=/etc/zypp/repos.d + fi + set +o noglob + $SUDO rm -f ${repodir}/rancher-k3s-common*.repo + set -o noglob + if [ -r /etc/redhat-release ] && [ "${3}" = "el7" ]; then + $SUDO yum install -y yum-utils + $SUDO yum-config-manager --enable rhel-7-server-extras-rpms + fi + $SUDO tee ${repodir}/rancher-k3s-common.repo >/dev/null << EOF +[rancher-k3s-common-${2}] +name=Rancher K3s Common (${2}) +baseurl=https://${1}/k3s/${2}/common/${4}/noarch +enabled=1 +gpgcheck=1 +repo_gpgcheck=0 +gpgkey=https://${1}/public.key +EOF + case ${3} in + sle) + rpm_installer="zypper --gpg-auto-import-keys" + if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then + rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}" + : "${INSTALL_K3S_SKIP_START:=true}" + fi + ;; + *) + rpm_installer="yum" + ;; + esac + if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then + rpm_installer=dnf + fi + # shellcheck disable=SC2086 + $SUDO ${rpm_installer} install -y "k3s-selinux" + fi + return +} + +# --- download and verify k3s --- +download_and_verify() { + if can_skip_download_binary; then + info 'Skipping k3s download and verify' + verify_k3s_is_executable + return + fi + + setup_verify_arch + verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' + setup_tmp + get_release_version + download_hash + + if installed_hash_matches; then + info 'Skipping binary downloaded, installed k3s matches hash' + return + fi + + download_binary + verify_binary + setup_binary +} + +# --- add additional utility links --- +create_symlinks() { + [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return + [ "${INSTALL_K3S_SYMLINK}" = skip ] && return + + for cmd in kubectl crictl ctr; do + if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then + which_cmd=$(command -v ${cmd} 2>/dev/null || true) + if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then + info "Creating ${BIN_DIR}/${cmd} symlink to k3s" + $SUDO ln -sf k3s ${BIN_DIR}/${cmd} + else + info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}" + fi + else + info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists" + fi + done +} + +# --- create killall script --- +create_killall() { + [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return + info "Creating killall script ${KILLALL_K3S_SH}" + $SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF +#!/bin/sh +[ $(id -u) -eq 0 ] || exec sudo $0 $@ + +for bin in /var/lib/rancher/k3s/data/**/bin/; do + [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux +done + +set -x + +for service in /etc/systemd/system/k3s*.service; do + [ -s $service ] && systemctl stop $(basename $service) +done + +for service in /etc/init.d/k3s*; do + [ -x $service ] && $service stop +done + +pschildren() { + ps -e -o ppid= -o pid= | \ + sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ + grep -w "^$1" | \ + cut -f2 +} + +pstree() { + for pid in $@; do + echo $pid + for child in $(pschildren $pid); do + pstree $child + done + done +} + +killtree() { + kill -9 $( + { set +x; } 2>/dev/null; + pstree $@; + set -x; + ) 2>/dev/null +} + +getshims() { + ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 +} + +killtree $({ set +x; } 2>/dev/null; getshims; set -x) + +do_unmount_and_remove() { + set +x + while read -r _ path _; do + case "$path" in $1*) echo "$path" ;; esac + done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"' + set -x +} + +do_unmount_and_remove '/run/k3s' +do_unmount_and_remove '/var/lib/rancher/k3s' +do_unmount_and_remove '/var/lib/kubelet/pods' +do_unmount_and_remove '/var/lib/kubelet/plugins' +do_unmount_and_remove '/run/netns/cni-' + +# Remove CNI namespaces +ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete + +# Delete network interface(s) that match 'master cni0' +ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do + iface=${iface%%@*} + [ -z "$iface" ] || ip link delete $iface +done +ip link delete cni0 +ip link delete flannel.1 +ip link delete flannel-v6.1 +ip link delete kube-ipvs0 +ip link delete flannel-wg +ip link delete flannel-wg-v6 +rm -rf /var/lib/cni/ +iptables-save | grep -v KUBE- | grep -v CNI- | grep -v flannel | iptables-restore +ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -v flannel | ip6tables-restore +EOF + $SUDO chmod 755 ${KILLALL_K3S_SH} + $SUDO chown root:root ${KILLALL_K3S_SH} +} + +# --- create uninstall script --- +create_uninstall() { + [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return + info "Creating uninstall script ${UNINSTALL_K3S_SH}" + $SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF +#!/bin/sh +set -x +[ \$(id -u) -eq 0 ] || exec sudo \$0 \$@ + +${KILLALL_K3S_SH} + +if command -v systemctl; then + systemctl disable ${SYSTEM_NAME} + systemctl reset-failed ${SYSTEM_NAME} + systemctl daemon-reload +fi +if command -v rc-update; then + rc-update delete ${SYSTEM_NAME} default +fi + +rm -f ${FILE_K3S_SERVICE} +rm -f ${FILE_K3S_ENV} + +remove_uninstall() { + rm -f ${UNINSTALL_K3S_SH} +} +trap remove_uninstall EXIT + +if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then + set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x + exit +fi + +for cmd in kubectl crictl ctr; do + if [ -L ${BIN_DIR}/\$cmd ]; then + rm -f ${BIN_DIR}/\$cmd + fi +done + +rm -rf /etc/rancher/k3s +rm -rf /run/k3s +rm -rf /run/flannel +rm -rf /var/lib/rancher/k3s +rm -rf /var/lib/kubelet +rm -f ${BIN_DIR}/k3s +rm -f ${KILLALL_K3S_SH} + +if type yum >/dev/null 2>&1; then + yum remove -y k3s-selinux + rm -f /etc/yum.repos.d/rancher-k3s-common*.repo +elif type zypper >/dev/null 2>&1; then + uninstall_cmd="zypper remove -y k3s-selinux" + if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then + uninstall_cmd="transactional-update --no-selfupdate -d run \$uninstall_cmd" + fi + \$uninstall_cmd + rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo +fi +EOF + $SUDO chmod 755 ${UNINSTALL_K3S_SH} + $SUDO chown root:root ${UNINSTALL_K3S_SH} +} + +# --- disable current service if loaded -- +systemd_disable() { + $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true + $SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true + $SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true +} + +# --- capture current env and create file containing k3s_ variables --- +create_env_file() { + info "env: Creating environment file ${FILE_K3S_ENV}" + $SUDO touch ${FILE_K3S_ENV} + $SUDO chmod 0600 ${FILE_K3S_ENV} + sh -c export | while read x v; do echo $v; done | grep -E '^(K3S|CONTAINERD)_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null + sh -c export | while read x v; do echo $v; done | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null +} + +# --- write systemd service file --- +create_systemd_service_file() { + info "systemd: Creating service file ${FILE_K3S_SERVICE}" + $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF +[Unit] +Description=Lightweight Kubernetes +Documentation=https://k3s.io +Wants=network-online.target +After=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=${SYSTEMD_TYPE} +EnvironmentFile=-/etc/default/%N +EnvironmentFile=-/etc/sysconfig/%N +EnvironmentFile=-${FILE_K3S_ENV} +KillMode=process +Delegate=yes +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=1048576 +LimitNPROC=infinity +LimitCORE=infinity +TasksMax=infinity +TimeoutStartSec=0 +Restart=always +RestartSec=5s +ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service' +ExecStartPre=-/sbin/modprobe br_netfilter +ExecStartPre=-/sbin/modprobe overlay +ExecStart=${BIN_DIR}/k3s \\ + ${CMD_K3S_EXEC} + +EOF +} + +# --- write openrc service file --- +create_openrc_service_file() { + LOG_FILE=/var/log/${SYSTEM_NAME}.log + + info "openrc: Creating service file ${FILE_K3S_SERVICE}" + $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF +#!/sbin/openrc-run + +depend() { + after network-online + want cgroups +} + +start_pre() { + rm -f /tmp/k3s.* +} + +supervisor=supervise-daemon +name=${SYSTEM_NAME} +command="${BIN_DIR}/k3s" +command_args="$(escape_dq "${CMD_K3S_EXEC}") + >>${LOG_FILE} 2>&1" + +output_log=${LOG_FILE} +error_log=${LOG_FILE} + +pidfile="/var/run/${SYSTEM_NAME}.pid" +respawn_delay=5 +respawn_max=0 + +set -o allexport +if [ -f /etc/environment ]; then source /etc/environment; fi +if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi +set +o allexport +EOF + $SUDO chmod 0755 ${FILE_K3S_SERVICE} + + $SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF +${LOG_FILE} { + missingok + notifempty + copytruncate +} +EOF +} + +# --- write systemd or openrc service file --- +create_service_file() { + [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file + [ "${HAS_OPENRC}" = true ] && create_openrc_service_file + return 0 +} + +# --- get hashes of the current k3s bin and service files +get_installed_hashes() { + $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true +} + +# --- enable and start systemd service --- +systemd_enable() { + info "systemd: Enabling ${SYSTEM_NAME} unit" + $SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null + $SUDO systemctl daemon-reload >/dev/null +} + +systemd_start() { + info "systemd: Starting ${SYSTEM_NAME}" + $SUDO systemctl restart ${SYSTEM_NAME} +} + +# --- enable and start openrc service --- +openrc_enable() { + info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel" + $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null +} + +openrc_start() { + info "openrc: Starting ${SYSTEM_NAME}" + $SUDO ${FILE_K3S_SERVICE} restart +} + +# --- startup systemd or openrc service --- +service_enable_and_start() { + if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ]; + then + info 'Failed to find memory cgroup, you may need to add "cgroup_memory=1 cgroup_enable=memory" to your linux cmdline (/boot/cmdline.txt on a Raspberry Pi)' + fi + + [ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return + + [ "${HAS_SYSTEMD}" = true ] && systemd_enable + [ "${HAS_OPENRC}" = true ] && openrc_enable + + [ "${INSTALL_K3S_SKIP_START}" = true ] && return + + POST_INSTALL_HASHES=$(get_installed_hashes) + if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_K3S_FORCE_RESTART}" != true ]; then + info 'No change detected so skipping service start' + return + fi + + [ "${HAS_SYSTEMD}" = true ] && systemd_start + [ "${HAS_OPENRC}" = true ] && openrc_start + return 0 +} + +# --- re-evaluate args to include env command --- +eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@") + +# --- run the install process -- +{ + verify_system + setup_env "$@" + download_and_verify + setup_selinux + create_symlinks + create_killall + create_uninstall + systemd_disable + create_env_file + create_service_file + service_enable_and_start +} \ No newline at end of file diff --git a/bootstrap/k3s/pkg/cloudinit/node.go b/bootstrap/k3s/pkg/cloudinit/node.go new file mode 100644 index 00000000..46df6399 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/node.go @@ -0,0 +1,53 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +import ( + "github.com/pkg/errors" +) + +const ( + workerCloudInit = `{{.Header}} +{{template "files" .WriteFiles}} +- path: /run/cluster-api/placeholder + owner: root:root + permissions: '0640' + content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)" +runcmd: +{{- template "commands" .PreK3sCommands }} + - 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh' +{{- template "commands" .PostK3sCommands }} +` +) + +// NodeInput defines the context to generate an agent node cloud-init. +type NodeInput struct { + BaseUserData +} + +// NewNode returns the cloud-init for joining a node instance. +func NewNode(input *NodeInput) ([]byte, error) { + if err := input.prepare(); err != nil { + return nil, err + } + userData, err := generate("JoinWorker", workerCloudInit, input) + if err != nil { + return nil, errors.Wrapf(err, "failed to generate user data for machine joining worker node") + } + + return userData, err +} diff --git a/bootstrap/k3s/pkg/cloudinit/utils.go b/bootstrap/k3s/pkg/cloudinit/utils.go new file mode 100644 index 00000000..1bfd0037 --- /dev/null +++ b/bootstrap/k3s/pkg/cloudinit/utils.go @@ -0,0 +1,34 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cloudinit + +import ( + "strings" + "text/template" +) + +var ( + defaultTemplateFuncMap = template.FuncMap{ + "Indent": templateYAMLIndent, + } +) + +func templateYAMLIndent(i int, input string) string { + split := strings.Split(input, "\n") + ident := "\n" + strings.Repeat(" ", i) + return strings.Repeat(" ", i) + strings.Join(split, ident) +} diff --git a/bootstrap/k3s/pkg/locking/control_plane_init_mutex.go b/bootstrap/k3s/pkg/locking/control_plane_init_mutex.go new file mode 100644 index 00000000..fdc71822 --- /dev/null +++ b/bootstrap/k3s/pkg/locking/control_plane_init_mutex.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package locking implements locking functionality. +package locking + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const semaphoreInformationKey = "lock-information" + +// ControlPlaneInitMutex uses a ConfigMap to synchronize cluster initialization. +type ControlPlaneInitMutex struct { + client client.Client +} + +// NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init. +func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex { + return &ControlPlaneInitMutex{ + client: client, + } +} + +// Lock allows a control plane node to be the first and only node to run kubeadm init. +func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool { + sema := newSemaphore() + cmName := configMapName(cluster.Name) + log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName)) + err := c.client.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cmName, + }, sema.ConfigMap) + switch { + case apierrors.IsNotFound(err): + break + case err != nil: + log.Error(err, "Failed to acquire init lock") + return false + default: // Successfully found an existing config map. + info, err := sema.information() + if err != nil { + log.Error(err, "Failed to get information about the existing init lock") + return false + } + // The machine requesting the lock is the machine that created the lock, therefore the lock is acquired. + if info.MachineName == machine.Name { + return true + } + + // If the machine that created the lock can not be found unlock the mutex. + if err := c.client.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: info.MachineName, + }, &clusterv1.Machine{}); err != nil { + log.Error(err, "Failed to get machine holding init lock") + if apierrors.IsNotFound(err) { + c.Unlock(ctx, cluster) + } + } + log.Info(fmt.Sprintf("Waiting for Machine %s to initialize", info.MachineName)) + return false + } + + // Adds owner reference, namespace and name + sema.setMetadata(cluster) + // Adds the additional information + if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil { + log.Error(err, "Failed to acquire init lock while setting semaphore information") + return false + } + + log.Info("Attempting to acquire the lock") + err = c.client.Create(ctx, sema.ConfigMap) + switch { + case apierrors.IsAlreadyExists(err): + log.Info("Cannot acquire the init lock. The init lock has been acquired by someone else") + return false + case err != nil: + log.Error(err, "Error acquiring the init lock") + return false + default: + return true + } +} + +// Unlock releases the lock. +func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool { + sema := newSemaphore() + cmName := configMapName(cluster.Name) + log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName)) + err := c.client.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cmName, + }, sema.ConfigMap) + switch { + case apierrors.IsNotFound(err): + log.Info("Control plane init lock not found, it may have been released already") + return true + case err != nil: + log.Error(err, "Error unlocking the control plane init lock") + return false + default: + // Delete the config map semaphore if there is no error fetching it + if err := c.client.Delete(ctx, sema.ConfigMap); err != nil { + if apierrors.IsNotFound(err) { + return true + } + log.Error(err, "Error deleting the config map underlying the control plane init lock") + return false + } + return true + } +} + +type information struct { + MachineName string `json:"machineName"` +} + +type semaphore struct { + *corev1.ConfigMap +} + +func newSemaphore() *semaphore { + return &semaphore{&corev1.ConfigMap{}} +} + +func configMapName(clusterName string) string { + return fmt.Sprintf("%s-lock", clusterName) +} + +func (s semaphore) information() (*information, error) { + li := &information{} + if err := json.Unmarshal([]byte(s.Data[semaphoreInformationKey]), li); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal semaphore information") + } + return li, nil +} + +func (s semaphore) setInformation(information *information) error { + b, err := json.Marshal(information) + if err != nil { + return errors.Wrap(err, "failed to marshal semaphore information") + } + s.Data = map[string]string{} + s.Data[semaphoreInformationKey] = string(b) + return nil +} + +func (s *semaphore) setMetadata(cluster *clusterv1.Cluster) { + s.ObjectMeta = metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: configMapName(cluster.Name), + Labels: map[string]string{ + clusterv1.ClusterLabelName: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, + }, + }, + } +} diff --git a/bootstrap/k3s/pkg/locking/control_plane_init_mutex_test.go b/bootstrap/k3s/pkg/locking/control_plane_init_mutex_test.go new file mode 100644 index 00000000..f0d7d665 --- /dev/null +++ b/bootstrap/k3s/pkg/locking/control_plane_init_mutex_test.go @@ -0,0 +1,308 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package locking + +import ( + "context" + "errors" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + clusterName = "test-cluster" + clusterNamespace = "test-namespace" +) + +var ( + ctx = ctrl.SetupSignalHandler() +) + +func TestControlPlaneInitMutex_Lock(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + uid := types.UID("test-uid") + tests := []struct { + name string + client client.Client + shouldAcquire bool + }{ + { + name: "should successfully acquire lock if the config cannot be found", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)), + }, + shouldAcquire: true, + }, + { + name: "should not acquire lock if already exits", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName(clusterName), + Namespace: clusterNamespace, + }, + }).Build(), + }, + shouldAcquire: false, + }, + { + name: "should not acquire lock if cannot create config map", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, configMapName(clusterName)), + createError: errors.New("create error"), + }, + shouldAcquire: false, + }, + { + name: "should not acquire lock if config map already exists while creating", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)), + createError: apierrors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)), + }, + shouldAcquire: false, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + l := &ControlPlaneInitMutex{ + client: tc.client, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName, + UID: uid, + }, + } + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("machine-%s", cluster.Name), + }, + } + + gs.Expect(l.Lock(ctx, cluster, machine)).To(Equal(tc.shouldAcquire)) + }) + } +} + +func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + newMachineName := "new-machine" + tests := []struct { + name string + client client.Client + expectedMachineName string + }{ + { + name: "should not give the lock to new machine if the machine that created it does exist", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName(clusterName), + Namespace: clusterNamespace}, + Data: map[string]string{ + "lock-information": "{\"machineName\":\"existent-machine\"}", + }}, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existent-machine", + Namespace: clusterNamespace, + }, + }, + ).Build(), + }, + expectedMachineName: "existent-machine", + }, + { + name: "should give the lock to new machine if the machine that created it does not exist", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName(clusterName), + Namespace: clusterNamespace}, + Data: map[string]string{ + "lock-information": "{\"machineName\":\"non-existent-machine\"}", + }}, + ).Build(), + }, + expectedMachineName: newMachineName, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + l := &ControlPlaneInitMutex{ + client: tc.client, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName, + }, + } + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: newMachineName, + }, + } + + g.Eventually(func(g Gomega) error { + l.Lock(ctx, cluster, machine) + + cm := &corev1.ConfigMap{} + g.Expect(tc.client.Get(ctx, client.ObjectKey{ + Name: configMapName(clusterName), + Namespace: cluster.Namespace, + }, cm)).To(Succeed()) + + info, err := semaphore{cm}.information() + g.Expect(err).To(BeNil()) + + g.Expect(info.MachineName).To(Equal(tc.expectedMachineName)) + return nil + }, "20s").Should(Succeed()) + }) + } +} + +func TestControlPlaneInitMutex_UnLock(t *testing.T) { + uid := types.UID("test-uid") + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName(clusterName), + Namespace: clusterNamespace, + }, + } + tests := []struct { + name string + client client.Client + shouldRelease bool + }{ + { + name: "should release lock by deleting config map", + client: &fakeClient{ + Client: fake.NewClientBuilder().Build(), + }, + shouldRelease: true, + }, + { + name: "should not release lock if cannot delete config map", + client: &fakeClient{ + Client: fake.NewClientBuilder().WithObjects(configMap).Build(), + deleteError: errors.New("delete error"), + }, + shouldRelease: false, + }, + { + name: "should release lock if config map does not exist", + client: &fakeClient{ + Client: fake.NewClientBuilder().Build(), + getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)), + }, + shouldRelease: true, + }, + { + name: "should not release lock if error while getting config map", + client: &fakeClient{ + Client: fake.NewClientBuilder().Build(), + getError: errors.New("get error"), + }, + shouldRelease: false, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + l := &ControlPlaneInitMutex{ + client: tc.client, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName, + UID: uid, + }, + } + + gs.Expect(l.Unlock(ctx, cluster)).To(Equal(tc.shouldRelease)) + }) + } +} + +type fakeClient struct { + client.Client + getError error + createError error + deleteError error +} + +func (fc *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if fc.getError != nil { + return fc.getError + } + return fc.Client.Get(ctx, key, obj) +} + +func (fc *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if fc.createError != nil { + return fc.createError + } + return fc.Client.Create(ctx, obj, opts...) +} + +func (fc *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if fc.deleteError != nil { + return fc.deleteError + } + return fc.Client.Delete(ctx, obj, opts...) +} diff --git a/bootstrap/k3s/pkg/types/config.go b/bootstrap/k3s/pkg/types/config.go new file mode 100644 index 00000000..ca7d0afd --- /dev/null +++ b/bootstrap/k3s/pkg/types/config.go @@ -0,0 +1,104 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// DefaultK3sConfigLocation is the default location for the k3s config file. +const DefaultK3sConfigLocation = "/etc/rancher/k3s/config.yaml" + +// K3sServerConfiguration is the configuration for the k3s server. +type K3sServerConfiguration struct { + // Database + DataStoreEndPoint string `json:"datastore-endpoint,omitempty"` + DataStoreCAFile string `json:"datastore-cafile,omitempty"` + DataStoreCertFile string `json:"datastore-certfile,omitempty"` + DataStoreKeyFile string `json:"datastore-keyfile,omitempty"` + + // Cluster + Token string `json:"token,omitempty"` + TokenFile string `json:"token-file,omitempty"` + Server string `json:"server,omitempty"` + CloudInit bool `json:"cloud-init,omitempty"` + + // Listener + // BindAddress k3s bind address. + BindAddress string `json:"bind-address,omitempty"` + // HTTPSListenPort HTTPS listen port. + HTTPSListenPort int `json:"https-listen-port,omitempty"` + // AdvertiseAddress IP address that apiserver uses to advertise to members of the cluster. + AdvertiseAddress string `json:"advertise-address,omitempty"` + // AdvertisePort Port that apiserver uses to advertise to members of the cluster (default: listen-port). + AdvertisePort int `json:"advertise-port,omitempty"` + // TLSSan Add additional hostname or IP as a Subject Alternative Name in the TLS cert. + TLSSan string `json:"tls-san,omitempty"` + + // Networking + // ClusterCIDR Network CIDR to use for pod IPs. + ClusterCIDR string `json:"cluster-cidr,omitempty"` + // ServiceCIDR Network CIDR to use for services IPs. + ServiceCIDR string `json:"service-cidr,omitempty"` + // ServiceNodePortRange Port range to reserve for services with NodePort visibility. + ServiceNodePortRange string `json:"service-node-port-range,omitempty"` + // ClusterDNS cluster IP for coredns service. Should be in your service-cidr range. + ClusterDNS string `json:"cluster-dns,omitempty"` + // ClusterDomain cluster Domain. + ClusterDomain string `json:"cluster-domain,omitempty"` + // FlannelBackend One of ‘none’, ‘vxlan’, ‘ipsec’, ‘host-gw’, or ‘wireguard’. (default: vxlan) + FlannelBackend string `json:"flannel-backend,omitempty"` + + // Agent + K3sAgentConfiguration `json:",inline"` +} + +// K3sAgentConfiguration is the configuration for the k3s agent. +type K3sAgentConfiguration struct { + // Cluster + Token string `json:"token,omitempty"` + TokenFile string `json:"token-file,omitempty"` + Server string `json:"server,omitempty"` + + // NodeName k3s node name. + NodeName string `json:"node-name,omitempty"` + // NodeLabels registering and starting kubelet with set of labels. + NodeLabels []string `json:"node-label,omitempty"` + // NodeTaints registering and starting kubelet with set of taints. + NodeTaints []string `json:"node-taint,omitempty"` + // SeLinux Enable SELinux in containerd + SeLinux bool `json:"selinux,omitempty"` + // LBServerPort + // Local port for supervisor client load-balancer. + // If the supervisor and apiserver are not colocated an additional port 1 less than this port + // will also be used for the apiserver client load-balancer. (default: 6444) + LBServerPort int `json:"lb-server-port,omitempty"` + // DataDir Folder to hold state. + DataDir string `json:"data-dir,omitempty"` + + // Runtime + // ContainerRuntimeEndpoint Disable embedded containerd and use alternative CRI implementation. + ContainerRuntimeEndpoint string `json:"container-runtime-endpoint,omitempty"` + // PauseImage Customized pause image for containerd or Docker sandbox. + PauseImage string `json:"pause-image,omitempty"` + // PrivateRegistry Path to a private registry configuration file. + PrivateRegistry string `json:"private-registry,omitempty"` + + // Networking + // NodeIP IP address to advertise for node. + NodeIP string `json:"node-ip,omitempty"` + // NodeExternalIP External IP address to advertise for node. + NodeExternalIP string `json:"node-external-ip,omitempty"` + // ResolvConf Path to Kubelet resolv.conf file. + ResolvConf string `json:"resolv-conf,omitempty"` +} diff --git a/bootstrap/k3s/pkg/types/doc.go b/bootstrap/k3s/pkg/types/doc.go new file mode 100644 index 00000000..7a55aa89 --- /dev/null +++ b/bootstrap/k3s/pkg/types/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package types contains k3s config types. +package types diff --git a/bootstrap/k3s/pkg/types/util.go b/bootstrap/k3s/pkg/types/util.go new file mode 100644 index 00000000..3e2b8528 --- /dev/null +++ b/bootstrap/k3s/pkg/types/util.go @@ -0,0 +1,101 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "github.com/jinzhu/copier" + kubeyaml "sigs.k8s.io/yaml" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" +) + +// MarshalInitServerConfiguration marshals the ServerConfiguration object into a string. +func MarshalInitServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec, token string) (string, error) { + obj := spec.ServerConfiguration + serverConfig := &K3sServerConfiguration{} + if err := copier.Copy(serverConfig, obj); err != nil { + return "", err + } + + serverConfig.Token = token + + serverConfig.CloudInit = spec.ServerConfiguration.Database.ClusterInit + + serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{ + NodeName: obj.Agent.Node.NodeName, + NodeLabels: obj.Agent.Node.NodeLabels, + NodeTaints: obj.Agent.Node.NodeTaints, + SeLinux: obj.Agent.Node.SeLinux, + LBServerPort: obj.Agent.Node.LBServerPort, + DataDir: obj.Agent.Node.DataDir, + ContainerRuntimeEndpoint: obj.Agent.Runtime.ContainerRuntimeEndpoint, + PauseImage: obj.Agent.Runtime.PauseImage, + PrivateRegistry: obj.Agent.Runtime.PrivateRegistry, + NodeIP: obj.Agent.Networking.NodeIP, + NodeExternalIP: obj.Agent.Networking.NodeExternalIP, + ResolvConf: obj.Agent.Networking.ResolvConf, + } + + b, err := kubeyaml.Marshal(serverConfig) + if err != nil { + return "", err + } + return string(b), nil +} + +// MarshalJoinServerConfiguration marshals the join ServerConfiguration object into a string. +func MarshalJoinServerConfiguration(obj *infrabootstrapv1.ServerConfiguration) (string, error) { + serverConfig := &K3sServerConfiguration{} + if err := copier.Copy(serverConfig, obj); err != nil { + return "", err + } + + serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{ + NodeName: obj.Agent.Node.NodeName, + NodeLabels: obj.Agent.Node.NodeLabels, + NodeTaints: obj.Agent.Node.NodeTaints, + SeLinux: obj.Agent.Node.SeLinux, + LBServerPort: obj.Agent.Node.LBServerPort, + DataDir: obj.Agent.Node.DataDir, + ContainerRuntimeEndpoint: obj.Agent.Runtime.ContainerRuntimeEndpoint, + PauseImage: obj.Agent.Runtime.PauseImage, + PrivateRegistry: obj.Agent.Runtime.PrivateRegistry, + NodeIP: obj.Agent.Networking.NodeIP, + NodeExternalIP: obj.Agent.Networking.NodeExternalIP, + ResolvConf: obj.Agent.Networking.ResolvConf, + } + + b, err := kubeyaml.Marshal(serverConfig) + if err != nil { + return "", err + } + return string(b), nil +} + +// MarshalJoinAgentConfiguration marshals the join AgentConfiguration object into a string. +func MarshalJoinAgentConfiguration(obj *infrabootstrapv1.AgentConfiguration) (string, error) { + serverConfig := &K3sAgentConfiguration{} + if err := copier.Copy(serverConfig, obj); err != nil { + return "", err + } + + b, err := kubeyaml.Marshal(serverConfig) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/cmd/kk/pkg/kubernetes/task_test.go b/cmd/kk/pkg/kubernetes/task_test.go new file mode 100644 index 00000000..8fdf21da --- /dev/null +++ b/cmd/kk/pkg/kubernetes/task_test.go @@ -0,0 +1,73 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package kubernetes + +import ( + "testing" +) + +func Test_calculateNextStr(t *testing.T) { + tests := []struct { + currentVersion string + desiredVersion string + want string + wantErr bool + errMsg string + }{ + { + currentVersion: "v1.21.5", + desiredVersion: "v1.22.5", + want: "v1.22.5", + wantErr: false, + }, + { + currentVersion: "v1.21.5", + desiredVersion: "v1.23.5", + want: "v1.22.12", + wantErr: false, + }, + { + currentVersion: "v1.17.5", + desiredVersion: "v1.18.5", + want: "", + wantErr: true, + errMsg: "the target version v1.18.5 is not supported", + }, + { + currentVersion: "v1.17.5", + desiredVersion: "v1.21.5", + want: "", + wantErr: true, + errMsg: "Kubernetes minor version v1.18.x is not supported", + }, + } + for _, tt := range tests { + t.Run("", func(t *testing.T) { + got, err := calculateNextStr(tt.currentVersion, tt.desiredVersion) + if (err != nil) != tt.wantErr { + t.Errorf("calculateNextStr() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("calculateNextStr() got = %v, want %v", got, tt.want) + } + if err != nil && err.Error() != tt.errMsg { + t.Errorf("calculateNextStr() error = %v, want %v", err, tt.errMsg) + } + }) + } +} diff --git a/cmd/kk/pkg/kubernetes/tasks.go b/cmd/kk/pkg/kubernetes/tasks.go index be4182ec..18b2cb24 100644 --- a/cmd/kk/pkg/kubernetes/tasks.go +++ b/cmd/kk/pkg/kubernetes/tasks.go @@ -616,12 +616,15 @@ func (c *CalculateNextVersion) Execute(_ connector.Runtime) error { if !ok { return errors.New("get upgrade plan Kubernetes version failed by pipeline cache") } - nextVersionStr := calculateNextStr(currentVersion, planVersion) + nextVersionStr, err := calculateNextStr(currentVersion, planVersion) + if err != nil { + return errors.Wrap(err, "calculate next version failed") + } c.KubeConf.Cluster.Kubernetes.Version = nextVersionStr return nil } -func calculateNextStr(currentVersion, desiredVersion string) string { +func calculateNextStr(currentVersion, desiredVersion string) (string, error) { current := versionutil.MustParseSemantic(currentVersion) target := versionutil.MustParseSemantic(desiredVersion) var nextVersionMinor uint @@ -632,7 +635,10 @@ func calculateNextStr(currentVersion, desiredVersion string) string { } if nextVersionMinor == target.Minor() { - return desiredVersion + if _, ok := files.FileSha256["kubeadm"]["amd64"][desiredVersion]; !ok { + return "", errors.Errorf("the target version %s is not supported", desiredVersion) + } + return desiredVersion, nil } else { nextVersionPatchList := make([]int, 0) for supportVersionStr := range files.FileSha256["kubeadm"]["amd64"] { @@ -644,9 +650,12 @@ func calculateNextStr(currentVersion, desiredVersion string) string { sort.Ints(nextVersionPatchList) nextVersion := current.WithMinor(nextVersionMinor) + if len(nextVersionPatchList) == 0 { + return "", errors.Errorf("Kubernetes minor version v%d.%d.x is not supported", nextVersion.Major(), nextVersion.Minor()) + } nextVersion = nextVersion.WithPatch(uint(nextVersionPatchList[len(nextVersionPatchList)-1])) - return fmt.Sprintf("v%s", nextVersion.String()) + return fmt.Sprintf("v%s", nextVersion.String()), nil } } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclusters.yaml index 30a59673..f2adfbb5 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclusters.yaml @@ -128,6 +128,10 @@ spec: description: The hostname on which the API server is serving. type: string type: object + distribution: + description: Distribution represents the Kubernetes distribution type + of the cluster. + type: string nodes: description: Nodes represents the information about the nodes available to the cluster diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclustertemplates.yaml index 8547fe36..6d9f5c97 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_kkclustertemplates.yaml @@ -149,6 +149,10 @@ spec: description: The hostname on which the API server is serving. type: string type: object + distribution: + description: Distribution represents the Kubernetes distribution + type of the cluster. + type: string nodes: description: Nodes represents the information about the nodes available to the cluster diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 41e91ecc..f56714da 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -15,20 +15,20 @@ commonLabels: patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- patches/webhook_in_kkclusters.yaml -#- patches/webhook_in_kkclustertemplates.yaml -#- patches/webhook_in_kkmachines.yaml -#- patches/webhook_in_kkmachinetemplates.yaml -#- patches/webhook_in_kkinstances.yaml +- patches/webhook_in_kkclusters.yaml +- patches/webhook_in_kkclustertemplates.yaml +- patches/webhook_in_kkmachines.yaml +- patches/webhook_in_kkmachinetemplates.yaml +- patches/webhook_in_kkinstances.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_kkclusters.yaml -#- patches/cainjection_in_kkclustertemplates.yaml -#- patches/cainjection_in_kkmachines.yaml -#- patches/cainjection_in_kkmachinetemplates.yaml -#- patches/cainjection_in_kkinstances.yaml +- patches/cainjection_in_kkclusters.yaml +- patches/cainjection_in_kkclustertemplates.yaml +- patches/cainjection_in_kkmachines.yaml +- patches/cainjection_in_kkmachinetemplates.yaml +- patches/cainjection_in_kkinstances.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_kkclusters.yaml b/config/crd/patches/cainjection_in_kkclusters.yaml index 002a0917..aabf0075 100644 --- a/config/crd/patches/cainjection_in_kkclusters.yaml +++ b/config/crd/patches/cainjection_in_kkclusters.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: kkclusters.infrastructure.kubekey.kubesphere.io + name: kkclusters.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_kkclusters.yaml b/config/crd/patches/webhook_in_kkclusters.yaml index 998c70c1..3ca82219 100644 --- a/config/crd/patches/webhook_in_kkclusters.yaml +++ b/config/crd/patches/webhook_in_kkclusters.yaml @@ -2,15 +2,17 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kkclusters.infrastructure.kubekey.kubesphere.io + name: kkclusters.infrastructure.cluster.x-k8s.io spec: conversion: strategy: Webhook webhook: + conversionReviewVersions: ["v1", "v1beta1"] clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== service: namespace: system name: webhook-service path: /convert - conversionReviewVersions: - - v1 diff --git a/config/crd/patches/webhook_in_kkclustertemplates.yaml b/config/crd/patches/webhook_in_kkclustertemplates.yaml index 45e30660..466cb622 100644 --- a/config/crd/patches/webhook_in_kkclustertemplates.yaml +++ b/config/crd/patches/webhook_in_kkclustertemplates.yaml @@ -7,10 +7,12 @@ spec: conversion: strategy: Webhook webhook: + conversionReviewVersions: ["v1", "v1beta1"] clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== service: namespace: system name: webhook-service path: /convert - conversionReviewVersions: - - v1 diff --git a/config/crd/patches/webhook_in_kkinstances.yaml b/config/crd/patches/webhook_in_kkinstances.yaml index 32a05b9b..e5be13f1 100644 --- a/config/crd/patches/webhook_in_kkinstances.yaml +++ b/config/crd/patches/webhook_in_kkinstances.yaml @@ -7,10 +7,12 @@ spec: conversion: strategy: Webhook webhook: + conversionReviewVersions: ["v1", "v1beta1"] clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== service: namespace: system name: webhook-service path: /convert - conversionReviewVersions: - - v1 diff --git a/config/crd/patches/webhook_in_kkmachines.yaml b/config/crd/patches/webhook_in_kkmachines.yaml index dce1a090..22058138 100644 --- a/config/crd/patches/webhook_in_kkmachines.yaml +++ b/config/crd/patches/webhook_in_kkmachines.yaml @@ -7,10 +7,12 @@ spec: conversion: strategy: Webhook webhook: + conversionReviewVersions: ["v1", "v1beta1"] clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== service: namespace: system name: webhook-service path: /convert - conversionReviewVersions: - - v1 diff --git a/config/crd/patches/webhook_in_kkmachinetemplates.yaml b/config/crd/patches/webhook_in_kkmachinetemplates.yaml index 1e101d5f..008ed4a1 100644 --- a/config/crd/patches/webhook_in_kkmachinetemplates.yaml +++ b/config/crd/patches/webhook_in_kkmachinetemplates.yaml @@ -7,10 +7,12 @@ spec: conversion: strategy: Webhook webhook: + conversionReviewVersions: ["v1", "v1beta1"] clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== service: namespace: system name: webhook-service path: /convert - conversionReviewVersions: - - v1 diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index 7d8f734e..1cba52d7 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: docker.io/kubespheredev/capkk-manager:main + - image: docker.io/kubespheredev/capkk-controller:main name: manager diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 11b15b7b..32d7e141 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -35,14 +35,19 @@ spec: livenessProbe: httpGet: path: /healthz - port: 8081 + port: 9440 initialDelaySeconds: 15 periodSeconds: 20 readinessProbe: httpGet: path: /readyz - port: 8081 + port: 9440 initialDelaySeconds: 5 periodSeconds: 10 serviceAccountName: controller-manager terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/controllers/kkinstance_controller.go b/controllers/kkinstance_controller.go index 809e896e..52b3a30c 100644 --- a/controllers/kkinstance_controller.go +++ b/controllers/kkinstance_controller.go @@ -70,7 +70,7 @@ type KKInstanceReconciler struct { sshClientFactory func(scope *scope.InstanceScope) ssh.Interface bootstrapFactory func(sshClient ssh.Interface, scope scope.LBScope, instanceScope *scope.InstanceScope) service.Bootstrap repositoryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.Repository - binaryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.BinaryService + binaryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope, distribution string) service.BinaryService containerManagerFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.ContainerManager provisioningFactory func(sshClient ssh.Interface, format bootstrapv1.Format) service.Provisioning WatchFilterValue string @@ -101,11 +101,11 @@ func (r *KKInstanceReconciler) getRepositoryService(sshClient ssh.Interface, sco return repository.NewService(sshClient, scope, instanceScope) } -func (r *KKInstanceReconciler) getBinaryService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.BinaryService { +func (r *KKInstanceReconciler) getBinaryService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope, distribution string) service.BinaryService { if r.binaryFactory != nil { - return r.binaryFactory(sshClient, scope, instanceScope) + return r.binaryFactory(sshClient, scope, instanceScope, distribution) } - return binary.NewService(sshClient, scope, instanceScope) + return binary.NewService(sshClient, scope, instanceScope, distribution) } func (r *KKInstanceReconciler) getContainerManager(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.ContainerManager { @@ -330,14 +330,7 @@ func (r *KKInstanceReconciler) reconcileNormal(ctx context.Context, instanceScop sshClient := r.getSSHClient(instanceScope) - phases := []func(context.Context, ssh.Interface, *scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error{ - r.reconcileBootstrap, - r.reconcileRepository, - r.reconcileBinaryService, - r.reconcileContainerManager, - r.reconcileProvisioning, - } - + phases := r.phaseFactory(kkInstanceScope) for _, phase := range phases { pollErr := wait.PollImmediate(r.WaitKKInstanceInterval, r.WaitKKInstanceTimeout, func() (done bool, err error) { if err := phase(ctx, sshClient, instanceScope, kkInstanceScope, lbScope); err != nil { diff --git a/controllers/kkinstance_controller_phase.go b/controllers/kkinstance_controller_phase.go index 5d4fe5c1..4e7876da 100644 --- a/controllers/kkinstance_controller_phase.go +++ b/controllers/kkinstance_controller_phase.go @@ -32,6 +32,29 @@ import ( "github.com/kubesphere/kubekey/pkg/service" ) +func (r *KKInstanceReconciler) phaseFactory(kkInstanceScope scope.KKInstanceScope) []func(context.Context, ssh.Interface, + *scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error { + var phases []func(context.Context, ssh.Interface, *scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error + switch kkInstanceScope.Distribution() { + case infrav1.KUBERNETES: + phases = append(phases, + r.reconcileBootstrap, + r.reconcileRepository, + r.reconcileBinaryService, + r.reconcileContainerManager, + r.reconcileProvisioning, + ) + case infrav1.K3S: + phases = append(phases, + r.reconcileBootstrap, + r.reconcileRepository, + r.reconcileBinaryService, + r.reconcileProvisioning, + ) + } + return phases +} + func (r *KKInstanceReconciler) reconcilePing(_ context.Context, instanceScope *scope.InstanceScope) error { instanceScope.Info("Reconcile ping") @@ -189,14 +212,8 @@ func (r *KKInstanceReconciler) reconcileBinaryService(_ context.Context, sshClie instanceScope.Info("Reconcile binary service") - svc := r.getBinaryService(sshClient, kkInstanceScope, instanceScope) - if err := svc.DownloadAll(r.WaitKKInstanceTimeout); err != nil { - return err - } - if err := svc.ConfigureKubelet(); err != nil { - return err - } - if err := svc.ConfigureKubeadm(); err != nil { + svc := r.getBinaryService(sshClient, kkInstanceScope, instanceScope, kkInstanceScope.Distribution()) + if err := svc.Download(r.WaitKKInstanceTimeout); err != nil { return err } return nil diff --git a/controllers/suite_test.go b/controllers/suite_test.go index dc55e363..fd32528e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -16,64 +16,47 @@ limitations under the License. package controllers -import ( - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - infrav1 "github.com/kubesphere/kubekey/api/v1beta1" - //+kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -// var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - - cfg, err := testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = infrav1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) +//// These tests use Ginkgo (BDD-style Go testing framework). Refer to +//// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// +//// var cfg *rest.Config +//var k8sClient client.Client +//var testEnv *envtest.Environment +// +//func TestAPIs(t *testing.T) { +// RegisterFailHandler(Fail) +// +// RunSpecsWithDefaultAndCustomReporters(t, +// "Controller Suite", +// []Reporter{printer.NewlineReporter{}}) +//} +// +//var _ = BeforeSuite(func() { +// logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +// +// By("bootstrapping test environment") +// testEnv = &envtest.Environment{ +// CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, +// ErrorIfCRDPathMissing: true, +// } +// +// cfg, err := testEnv.Start() +// Expect(err).NotTo(HaveOccurred()) +// Expect(cfg).NotTo(BeNil()) +// +// err = infrav1.AddToScheme(scheme.Scheme) +// Expect(err).NotTo(HaveOccurred()) +// +// //+kubebuilder:scaffold:scheme +// +// k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) +// Expect(err).NotTo(HaveOccurred()) +// Expect(k8sClient).NotTo(BeNil()) +// +//}, 60) +// +//var _ = AfterSuite(func() { +// By("tearing down the test environment") +// err := testEnv.Stop() +// Expect(err).NotTo(HaveOccurred()) +//}) diff --git a/controlplane/k3s/PROJECT b/controlplane/k3s/PROJECT new file mode 100644 index 00000000..6d7261b7 --- /dev/null +++ b/controlplane/k3s/PROJECT @@ -0,0 +1,36 @@ +domain: cluster.x-k8s.io +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: k3s +repo: github.com/kubesphere/kubekey/controlplane/k3s +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: controlplane + kind: K3sControlPlane + path: github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: controlplane + kind: K3sControlPlaneTemplate + path: github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +version: "3" diff --git a/controlplane/k3s/api/v1beta1/condition_consts.go b/controlplane/k3s/api/v1beta1/condition_consts.go new file mode 100644 index 00000000..effbf65d --- /dev/null +++ b/controlplane/k3s/api/v1beta1/condition_consts.go @@ -0,0 +1,135 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1beta1 + +import ( + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// Conditions and condition Reasons for the KubeadmControlPlane object. + +const ( + // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. + MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" +) + +const ( + // CertificatesAvailableCondition documents that cluster certificates were generated as part of the + // processing of a a KubeadmControlPlane object. + CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + + // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting + // an error while generating certificates; those kind of errors are usually temporary and the controller + // automatically recover from them. + CertificatesGenerationFailedReason = "CertificatesGenerationFailed" +) + +const ( + // AvailableCondition documents that the first control plane instance has completed the kubeadm init operation + // and so the control plane is available and an API server instance is ready for processing requests. + AvailableCondition clusterv1.ConditionType = "Available" + + // WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first + // control plane instance to complete the kubeadm init operation. + WaitingForKubeadmInitReason = "WaitingForKubeadmInit" +) + +const ( + // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the K3sControlPlane + // is up to date. When this condition is false, the KubeadmControlPlane is executing a rolling upgrade. + MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + + // RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a + // rolling upgrade for aligning the machines spec to the desired state. + RollingUpdateInProgressReason = "RollingUpdateInProgress" +) + +const ( + // ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines. + ResizedCondition clusterv1.ConditionType = "Resized" + + // ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas. + ScalingUpReason = "ScalingUp" + + // ScalingDownReason (Severity=Info) documents a KubeadmControlPlane that is decreasing the number of replicas. + ScalingDownReason = "ScalingDown" +) + +const ( + // ControlPlaneComponentsHealthyCondition reports the overall status of control plane components + // implemented as static pods generated by kubeadm including kube-api-server, kube-controller manager, + // kube-scheduler and etcd if managed. + ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy" + + // ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy. + ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy" + + // ControlPlaneComponentsUnknownReason reports a control plane component in unknown status. + ControlPlaneComponentsUnknownReason = "ControlPlaneComponentsUnknown" + + // ControlPlaneComponentsInspectionFailedReason documents a failure in inspecting the control plane component status. + ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed" + + // MachineAgentHealthyCondition reports a machine's agent operational status. + // NOTE: This conditions exists only if a stacked etcd cluster is used. + MachineAgentHealthyCondition clusterv1.ConditionType = "AgentHealthy" + + // PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase. + PodProvisioningReason = "PodProvisioning" + + // PodMissingReason (Severity=Error) documents a pod does not exist. + PodMissingReason = "PodMissing" + + // PodFailedReason (Severity=Error) documents if a pod failed during provisioning i.e., e.g CrashLoopbackOff, ImagePullBackOff + // or if all the containers in a pod have terminated. + PodFailedReason = "PodFailed" + + // PodInspectionFailedReason documents a failure in inspecting the pod status. + PodInspectionFailedReason = "PodInspectionFailed" +) + +const ( + // EtcdClusterHealthyCondition documents the overall etcd cluster's health. + EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition" + + // EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status. + EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed" + + // MachineEtcdMemberHealthyCondition report the machine's etcd member's health status. + // NOTE: This conditions exists only if a stacked etcd cluster is used. + MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy" + + // EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status. + EtcdMemberInspectionFailedReason = "MemberInspectionFailed" + + // MachinesCreatedCondition documents that the machines controlled by the K3sControlPlane are created. + // When this condition is false, it indicates that there was an error when cloning the infrastructure/bootstrap template or + // when generating the machine object. + MachinesCreatedCondition clusterv1.ConditionType = "MachinesCreated" + + // InfrastructureTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to + // clone the infrastructure template. + InfrastructureTemplateCloningFailedReason = "InfrastructureTemplateCloningFailed" + + // BootstrapTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to + // clone the bootstrap template. + BootstrapTemplateCloningFailedReason = "BootstrapTemplateCloningFailed" + + // MachineGenerationFailedReason (Severity=Error) documents a KubeadmControlPlane failing to + // generate a machine object. + MachineGenerationFailedReason = "MachineGenerationFailed" +) diff --git a/controlplane/k3s/api/v1beta1/groupversion_info.go b/controlplane/k3s/api/v1beta1/groupversion_info.go new file mode 100644 index 00000000..2e71079c --- /dev/null +++ b/controlplane/k3s/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the controlplane v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=controlplane.cluster.x-k8s.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/controlplane/k3s/api/v1beta1/k3scontrolplane_types.go b/controlplane/k3s/api/v1beta1/k3scontrolplane_types.go new file mode 100644 index 00000000..68ce9bb1 --- /dev/null +++ b/controlplane/k3s/api/v1beta1/k3scontrolplane_types.go @@ -0,0 +1,242 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" +) + +// RolloutStrategyType defines the rollout strategies for a KubeadmControlPlane. +type RolloutStrategyType string + +const ( + // RollingUpdateStrategyType replaces the old control planes by new one using rolling update + // i.e. gradually scale up or down the old control planes and scale up or down the new one. + RollingUpdateStrategyType RolloutStrategyType = "RollingUpdate" +) + +const ( + // K3sControlPlaneFinalizer is the finalizer applied to K3sControlPlane resources + // by its managing controller. + K3sControlPlaneFinalizer = "k3s.controlplane.cluster.x-k8s.io" + + // SkipCoreDNSAnnotation annotation explicitly skips reconciling CoreDNS if set. + SkipCoreDNSAnnotation = "controlplane.cluster.x-k8s.io/skip-coredns" + + // SkipKubeProxyAnnotation annotation explicitly skips reconciling kube-proxy if set. + SkipKubeProxyAnnotation = "controlplane.cluster.x-k8s.io/skip-kube-proxy" + + // K3sServerConfigurationAnnotation is a machine annotation that stores the json-marshalled string of K3SCP ClusterConfiguration. + // This annotation is used to detect any changes in ClusterConfiguration and trigger machine rollout in K3SCP. + K3sServerConfigurationAnnotation = "controlplane.cluster.x-k8s.io/k3s-server-configuration" +) + +// K3sControlPlaneSpec defines the desired state of K3sControlPlane +type K3sControlPlaneSpec struct { + // Number of desired machines. Defaults to 1. When stacked etcd is used only + // odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Version defines the desired K3s version. + Version string `json:"version"` + + // MachineTemplate contains information about how machines + // should be shaped when creating or updating a control plane. + MachineTemplate K3sControlPlaneMachineTemplate `json:"machineTemplate"` + + // K3sConfigSpec is a K3sConfigSpec + // to use for initializing and joining machines to the control plane. + // +optional + K3sConfigSpec infrabootstrapv1.K3sConfigSpec `json:"k3sConfigSpec,omitempty"` + + // RolloutAfter is a field to indicate a rollout should be performed + // after the specified time even if no changes have been made to the + // KubeadmControlPlane. + // + // +optional + RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"` + + // The RolloutStrategy to use to replace control plane machines with + // new ones. + // +optional + // +kubebuilder:default={type: "RollingUpdate", rollingUpdate: {maxSurge: 1}} + RolloutStrategy *RolloutStrategy `json:"rolloutStrategy,omitempty"` +} + +// K3sControlPlaneMachineTemplate defines the template for Machines +// in a K3sControlPlane object. +type K3sControlPlaneMachineTemplate struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + + // InfrastructureRef is a required reference to a custom resource + // offered by an infrastructure provider. + InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` + + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // The default value is 0, meaning that the node can be drained without any time limitations. + // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + // If no value is provided, the default value for this property of the Machine resource will be used. + // +optional + NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` +} + +// RolloutStrategy describes how to replace existing machines +// with new ones. +type RolloutStrategy struct { + // Type of rollout. Currently the only supported strategy is + // "RollingUpdate". + // Default is RollingUpdate. + // +optional + Type RolloutStrategyType `json:"type,omitempty"` + + // Rolling update config params. Present only if + // RolloutStrategyType = RollingUpdate. + // +optional + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` +} + +// RollingUpdate is used to control the desired behavior of rolling update. +type RollingUpdate struct { + // The maximum number of control planes that can be scheduled above or under the + // desired number of control planes. + // Value can be an absolute number 1 or 0. + // Defaults to 1. + // Example: when this is set to 1, the control plane can be scaled + // up immediately when the rolling update starts. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` +} + +// K3sControlPlaneStatus defines the observed state of K3sControlPlane +type K3sControlPlaneStatus struct { + // Selector is the label selector in string format to avoid introspection + // by clients, and is used to provide the CRD-based integration for the + // scale subresource and additional integrations for things like kubectl + // describe.. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string `json:"selector,omitempty"` + + // Total number of non-terminated machines targeted by this control plane + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + + // Version represents the minimum Kubernetes version for the control plane machines + // in the cluster. + // +optional + Version *string `json:"version,omitempty"` + + // Total number of non-terminated machines targeted by this control plane + // that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas"` + + // Total number of fully running and ready control plane machines. + // +optional + ReadyReplicas int32 `json:"readyReplicas"` + + // Total number of unavailable machines targeted by this control plane. + // This is the total number of machines that are still required for + // the deployment to have 100% available capacity. They may either + // be machines that are running but not yet ready or machines + // that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas"` + + // Initialized denotes whether or not the control plane has the + // uploaded kubeadm-config configmap. + // +optional + Initialized bool `json:"initialized"` + + // Ready denotes that the KubeadmControlPlane API Server is ready to + // receive requests. + // +optional + Ready bool `json:"ready"` + + // FailureReason indicates that there is a terminal problem reconciling the + // state, and will be set to a token value suitable for + // programmatic interpretation. + // +optional + FailureReason errors.KubeadmControlPlaneStatusError `json:"failureReason,omitempty"` + + // ErrorMessage indicates that there is a terminal problem reconciling the + // state, and will be set to a descriptive error message. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions defines current service state of the KubeadmControlPlane. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=k3scontrolplanes,shortName=k3scp,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// K3sControlPlane is the Schema for the k3scontrolplanes API +type K3sControlPlane struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec K3sControlPlaneSpec `json:"spec,omitempty"` + Status K3sControlPlaneStatus `json:"status,omitempty"` +} + +// GetConditions returns the set of conditions for this object. +func (in *K3sControlPlane) GetConditions() clusterv1.Conditions { + return in.Status.Conditions +} + +// SetConditions sets the conditions on this object. +func (in *K3sControlPlane) SetConditions(conditions clusterv1.Conditions) { + in.Status.Conditions = conditions +} + +//+kubebuilder:object:root=true + +// K3sControlPlaneList contains a list of K3sControlPlane +type K3sControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []K3sControlPlane `json:"items"` +} + +func init() { + SchemeBuilder.Register(&K3sControlPlane{}, &K3sControlPlaneList{}) +} diff --git a/controlplane/k3s/api/v1beta1/k3scontrolplane_webhook.go b/controlplane/k3s/api/v1beta1/k3scontrolplane_webhook.go new file mode 100644 index 00000000..b5166576 --- /dev/null +++ b/controlplane/k3s/api/v1beta1/k3scontrolplane_webhook.go @@ -0,0 +1,461 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/blang/semver" + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api/util/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" +) + +func (in *K3sControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(in). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanes,versions=v1beta1,name=default.k3scontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanes,versions=v1beta1,name=validation.k3scontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &K3sControlPlane{} +var _ webhook.Validator = &K3sControlPlane{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (in *K3sControlPlane) Default() { + defaultK3sControlPlaneSpec(&in.Spec, in.Namespace) +} + +func defaultK3sControlPlaneSpec(s *K3sControlPlaneSpec, namespace string) { + if s.Replicas == nil { + replicas := int32(1) + s.Replicas = &replicas + } + + if s.MachineTemplate.InfrastructureRef.Namespace == "" { + s.MachineTemplate.InfrastructureRef.Namespace = namespace + } + + if !strings.HasPrefix(s.Version, "v") { + s.Version = "v" + s.Version + } + + if s.K3sConfigSpec.ServerConfiguration.Database.DataStoreEndPoint == "" && s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit { + s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit = true + } + + infrabootstrapv1.DefaultK3sConfigSpec(&s.K3sConfigSpec) + + s.RolloutStrategy = defaultRolloutStrategy(s.RolloutStrategy) +} + +func defaultRolloutStrategy(rolloutStrategy *RolloutStrategy) *RolloutStrategy { + ios1 := intstr.FromInt(1) + + if rolloutStrategy == nil { + rolloutStrategy = &RolloutStrategy{} + } + + // Enforce RollingUpdate strategy and default MaxSurge if not set. + if rolloutStrategy != nil { + if len(rolloutStrategy.Type) == 0 { + rolloutStrategy.Type = RollingUpdateStrategyType + } + if rolloutStrategy.Type == RollingUpdateStrategyType { + if rolloutStrategy.RollingUpdate == nil { + rolloutStrategy.RollingUpdate = &RollingUpdate{} + } + rolloutStrategy.RollingUpdate.MaxSurge = intstr.ValueOrDefault(rolloutStrategy.RollingUpdate.MaxSurge, ios1) + } + } + + return rolloutStrategy +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (in *K3sControlPlane) ValidateCreate() error { + spec := in.Spec + allErrs := validateK3sControlPlaneSpec(spec, in.Namespace, field.NewPath("spec")) + allErrs = append(allErrs, validateServerConfiguration(spec.K3sConfigSpec.ServerConfiguration, nil, field.NewPath("spec", "k3sConfigSpec", "serverConfiguration"))...) + allErrs = append(allErrs, spec.K3sConfigSpec.Validate(field.NewPath("spec", "k3sConfigSpec"))...) + if len(allErrs) > 0 { + return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs) + } + return nil +} + +const ( + spec = "spec" + k3sConfigSpec = "k3sConfigSpec" + preK3sCommands = "preK3sCommands" + postK3sCommands = "postK3sCommands" + files = "files" +) + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (in *K3sControlPlane) ValidateUpdate(old runtime.Object) error { + // add a * to indicate everything beneath is ok. + // For example, {"spec", "*"} will allow any path under "spec" to change. + allowedPaths := [][]string{ + {"metadata", "*"}, + {spec, k3sConfigSpec, preK3sCommands}, + {spec, k3sConfigSpec, postK3sCommands}, + {spec, k3sConfigSpec, files}, + {spec, "machineTemplate", "metadata", "*"}, + {spec, "machineTemplate", "infrastructureRef", "apiVersion"}, + {spec, "machineTemplate", "infrastructureRef", "name"}, + {spec, "machineTemplate", "infrastructureRef", "kind"}, + {spec, "machineTemplate", "nodeDrainTimeout"}, + {spec, "machineTemplate", "nodeDeletionTimeout"}, + {spec, "replicas"}, + {spec, "version"}, + {spec, "rolloutAfter"}, + {spec, "rolloutStrategy", "*"}, + } + + allErrs := validateK3sControlPlaneSpec(in.Spec, in.Namespace, field.NewPath("spec")) + + prev, ok := old.(*K3sControlPlane) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expecting K3sControlPlane but got a %T", old)) + } + + originalJSON, err := json.Marshal(prev) + if err != nil { + return apierrors.NewInternalError(err) + } + modifiedJSON, err := json.Marshal(in) + if err != nil { + return apierrors.NewInternalError(err) + } + + diff, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return apierrors.NewInternalError(err) + } + jsonPatch := map[string]interface{}{} + if err := json.Unmarshal(diff, &jsonPatch); err != nil { + return apierrors.NewInternalError(err) + } + // Build a list of all paths that are trying to change + diffpaths := paths([]string{}, jsonPatch) + // Every path in the diff must be valid for the update function to work. + for _, path := range diffpaths { + // Ignore paths that are empty + if len(path) == 0 { + continue + } + if !allowed(allowedPaths, path) { + if len(path) == 1 { + allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0]), "cannot be modified")) + continue + } + allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0], path[1:]...), "cannot be modified")) + } + } + + allErrs = append(allErrs, in.validateVersion(prev.Spec.Version)...) + allErrs = append(allErrs, validateServerConfiguration(in.Spec.K3sConfigSpec.ServerConfiguration, prev.Spec.K3sConfigSpec.ServerConfiguration, field.NewPath("spec", "k3sConfigSpec", "serverConfiguration"))...) + allErrs = append(allErrs, in.Spec.K3sConfigSpec.Validate(field.NewPath("spec", "K3sConfigSpec"))...) + + if len(allErrs) > 0 { + return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlane").GroupKind(), in.Name, allErrs) + } + + return nil +} + +func validateK3sControlPlaneSpec(s K3sControlPlaneSpec, namespace string, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if s.Replicas == nil { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("replicas"), + "is required", + ), + ) + } else if *s.Replicas <= 0 { + // The use of the scale subresource should provide a guarantee that negative values + // should not be accepted for this field, but since we have to validate that Replicas != 0 + // it doesn't hurt to also additionally validate for negative numbers here as well. + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("replicas"), + "cannot be less than or equal to 0", + ), + ) + } + + if s.MachineTemplate.InfrastructureRef.APIVersion == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "apiVersion"), + s.MachineTemplate.InfrastructureRef.APIVersion, + "cannot be empty", + ), + ) + } + if s.MachineTemplate.InfrastructureRef.Kind == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "kind"), + s.MachineTemplate.InfrastructureRef.Kind, + "cannot be empty", + ), + ) + } + if s.MachineTemplate.InfrastructureRef.Name == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "name"), + s.MachineTemplate.InfrastructureRef.Name, + "cannot be empty", + ), + ) + } + if s.MachineTemplate.InfrastructureRef.Namespace != namespace { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "namespace"), + s.MachineTemplate.InfrastructureRef.Namespace, + "must match metadata.namespace", + ), + ) + } + + if !version.KubeSemver.MatchString(s.Version) { + allErrs = append(allErrs, field.Invalid(pathPrefix.Child("version"), s.Version, "must be a valid semantic version")) + } + + allErrs = append(allErrs, validateRolloutStrategy(s.RolloutStrategy, s.Replicas, pathPrefix.Child("rolloutStrategy"))...) + + return allErrs +} + +func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if rolloutStrategy == nil { + return allErrs + } + + if rolloutStrategy.Type != RollingUpdateStrategyType { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("type"), + "only RollingUpdateStrategyType is supported", + ), + ) + } + + ios1 := intstr.FromInt(1) + ios0 := intstr.FromInt(0) + + if *rolloutStrategy.RollingUpdate.MaxSurge == ios0 && (replicas != nil && *replicas < int32(3)) { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("rollingUpdate"), + "when KubeadmControlPlane is configured to scale-in, replica count needs to be at least 3", + ), + ) + } + + if *rolloutStrategy.RollingUpdate.MaxSurge != ios1 && *rolloutStrategy.RollingUpdate.MaxSurge != ios0 { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("rollingUpdate", "maxSurge"), + "value must be 1 or 0", + ), + ) + } + + return allErrs +} + +func validateServerConfiguration(newServerConfiguration, oldServerConfiguration *infrabootstrapv1.ServerConfiguration, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if newServerConfiguration == nil { + return allErrs + } + + if newServerConfiguration.Database.ClusterInit && newServerConfiguration.Database.DataStoreEndPoint != "" { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("database", "clusterInit"), + "cannot have both external and local etcd", + ), + ) + } + + // update validations + if oldServerConfiguration != nil { + if newServerConfiguration.Database.ClusterInit && oldServerConfiguration.Database.DataStoreEndPoint != "" { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("database", "clusterInit"), + "cannot change between external and local etcd", + ), + ) + } + + if newServerConfiguration.Database.DataStoreEndPoint != "" && oldServerConfiguration.Database.ClusterInit { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("database", "dataStoreEndPoint"), + "cannot change between external and local etcd", + ), + ) + } + } + + return allErrs +} + +func allowed(allowList [][]string, path []string) bool { + for _, allowed := range allowList { + if pathsMatch(allowed, path) { + return true + } + } + return false +} + +func pathsMatch(allowed, path []string) bool { + // if either are empty then no match can be made + if len(allowed) == 0 || len(path) == 0 { + return false + } + i := 0 + for i = range path { + // reached the end of the allowed path and no match was found + if i > len(allowed)-1 { + return false + } + if allowed[i] == "*" { + return true + } + if path[i] != allowed[i] { + return false + } + } + // path has been completely iterated and has not matched the end of the path. + // e.g. allowed: []string{"a","b","c"}, path: []string{"a"} + return i >= len(allowed)-1 +} + +// paths builds a slice of paths that are being modified. +func paths(path []string, diff map[string]interface{}) [][]string { + allPaths := [][]string{} + for key, m := range diff { + nested, ok := m.(map[string]interface{}) + if !ok { + // We have to use a copy of path, because otherwise the slice we append to + // allPaths would be overwritten in another iteration. + tmp := make([]string, len(path)) + copy(tmp, path) + allPaths = append(allPaths, append(tmp, key)) + continue + } + allPaths = append(allPaths, paths(append(path, key), nested)...) + } + return allPaths +} + +func (in *K3sControlPlane) validateVersion(previousVersion string) (allErrs field.ErrorList) { + fromVersion, err := version.ParseMajorMinorPatch(previousVersion) + if err != nil { + allErrs = append(allErrs, + field.InternalError( + field.NewPath("spec", "version"), + errors.Wrapf(err, "failed to parse current k3scontrolplane version: %s", previousVersion), + ), + ) + return allErrs + } + + toVersion, err := version.ParseMajorMinorPatch(in.Spec.Version) + if err != nil { + allErrs = append(allErrs, + field.InternalError( + field.NewPath("spec", "version"), + errors.Wrapf(err, "failed to parse updated k3scontrolplane version: %s", in.Spec.Version), + ), + ) + return allErrs + } + + // Check if we're trying to upgrade to Kubernetes v1.19.0, which is not supported. + // + // See https://github.com/kubernetes-sigs/cluster-api/issues/3564 + if fromVersion.NE(toVersion) && toVersion.Equals(semver.MustParse("1.19.0")) { + allErrs = append(allErrs, + field.Forbidden( + field.NewPath("spec", "version"), + "cannot update Kubernetes version to v1.19.0, for more information see https://github.com/kubernetes-sigs/cluster-api/issues/3564", + ), + ) + return allErrs + } + + // Since upgrades to the next minor version are allowed, irrespective of the patch version. + ceilVersion := semver.Version{ + Major: fromVersion.Major, + Minor: fromVersion.Minor + 2, + Patch: 0, + } + if toVersion.GTE(ceilVersion) { + allErrs = append(allErrs, + field.Forbidden( + field.NewPath("spec", "version"), + fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, in.Spec.Version), + ), + ) + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (in *K3sControlPlane) ValidateDelete() error { + return nil +} diff --git a/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_types.go b/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_types.go new file mode 100644 index 00000000..b1c6b2ba --- /dev/null +++ b/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_types.go @@ -0,0 +1,106 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" +) + +// K3sControlPlaneTemplateSpec defines the desired state of K3sControlPlaneTemplate +type K3sControlPlaneTemplateSpec struct { + Template K3sControlPlaneTemplateResource `json:"template"` +} + +// K3sControlPlaneTemplateResource describes the data needed to create a K3sControlPlane from a template. +type K3sControlPlaneTemplateResource struct { + Spec K3sControlPlaneTemplateResourceSpec `json:"spec"` +} + +// K3sControlPlaneTemplateResourceSpec defines the desired state of KubeadmControlPlane. +// NOTE: K3sControlPlaneTemplateResourceSpec is similar to K3sControlPlaneSpec but +// omits Replicas and Version fields. These fields do not make sense on the K3sControlPlaneTemplate, +// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot +// be configured on the K3sControlPlaneTemplate. +type K3sControlPlaneTemplateResourceSpec struct { + // MachineTemplate contains information about how machines + // should be shaped when creating or updating a control plane. + // +optional + MachineTemplate *K3sControlPlaneTemplateMachineTemplate `json:"machineTemplate,omitempty"` + + // K3sConfigSpec is a K3sConfigSpec + // to use for initializing and joining machines to the control plane. + K3sConfigSpec infrabootstrapv1.K3sConfigSpec `json:"k3sConfigSpec"` + + // RolloutAfter is a field to indicate a rollout should be performed + // after the specified time even if no changes have been made to the + // KubeadmControlPlane. + // + // +optional + RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"` + + // The RolloutStrategy to use to replace control plane machines with + // new ones. + // +optional + // +kubebuilder:default={type: "RollingUpdate", rollingUpdate: {maxSurge: 1}} + RolloutStrategy *RolloutStrategy `json:"rolloutStrategy,omitempty"` +} + +// K3sControlPlaneTemplateMachineTemplate defines the template for Machines +// in a KubeadmControlPlaneTemplate object. +// NOTE: KubeadmControlPlaneTemplateMachineTemplate is similar to KubeadmControlPlaneMachineTemplate but +// omits ObjectMeta and InfrastructureRef fields. These fields do not make sense on the KubeadmControlPlaneTemplate, +// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot +// be configured on the KubeadmControlPlaneTemplate. +type K3sControlPlaneTemplateMachineTemplate struct { + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // The default value is 0, meaning that the node can be drained without any time limitations. + // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + // If no value is provided, the default value for this property of the Machine resource will be used. + // +optional + NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// K3sControlPlaneTemplate is the Schema for the k3scontrolplanetemplates API +type K3sControlPlaneTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec K3sControlPlaneTemplateSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// K3sControlPlaneTemplateList contains a list of K3sControlPlaneTemplate +type K3sControlPlaneTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []K3sControlPlaneTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&K3sControlPlaneTemplate{}, &K3sControlPlaneTemplateList{}) +} diff --git a/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_webhook.go b/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_webhook.go new file mode 100644 index 00000000..a2c3d94b --- /dev/null +++ b/controlplane/k3s/api/v1beta1/k3scontrolplanetemplate_webhook.go @@ -0,0 +1,106 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api/feature" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" +) + +const k3sControlPlaneTemplateImmutableMsg = "K3sControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead." + +func (r *K3sControlPlaneTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate,mutating=true,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanetemplates,versions=v1beta1,name=default.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &K3sControlPlaneTemplate{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *K3sControlPlaneTemplate) Default() { + infrabootstrapv1.DefaultK3sConfigSpec(&r.Spec.Template.Spec.K3sConfigSpec) + + r.Spec.Template.Spec.RolloutStrategy = defaultRolloutStrategy(r.Spec.Template.Spec.RolloutStrategy) +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate,mutating=false,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanetemplates,versions=v1beta1,name=validation.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Validator = &K3sControlPlaneTemplate{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *K3sControlPlaneTemplate) ValidateCreate() error { + // NOTE: KubeadmControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook + // must prevent creating new objects in case the feature flag is disabled. + if !feature.Gates.Enabled(feature.ClusterTopology) { + return field.Forbidden( + field.NewPath("spec"), + "can be set only if the ClusterTopology feature flag is enabled", + ) + } + + spec := r.Spec.Template.Spec + allErrs := validateK3sControlPlaneTemplateResourceSpec(spec, field.NewPath("spec", "template", "spec")) + allErrs = append(allErrs, validateServerConfiguration(spec.K3sConfigSpec.ServerConfiguration, nil, field.NewPath("spec", "template", "spec", "k3sConfigSpec", "serverConfiguration"))...) + allErrs = append(allErrs, spec.K3sConfigSpec.Validate(field.NewPath("spec", "template", "spec", "k3sConfigSpec"))...) + if len(allErrs) > 0 { + return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlaneTemplate").GroupKind(), r.Name, allErrs) + } + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *K3sControlPlaneTemplate) ValidateUpdate(oldRaw runtime.Object) error { + var allErrs field.ErrorList + old, ok := oldRaw.(*K3sControlPlaneTemplate) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a K3sControlPlaneTemplate but got a %T", oldRaw)) + } + + if !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "template", "spec"), r, k3sControlPlaneTemplateImmutableMsg), + ) + } + + if len(allErrs) == 0 { + return nil + } + return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlaneTemplate").GroupKind(), r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *K3sControlPlaneTemplate) ValidateDelete() error { + return nil +} + +// validateK3sControlPlaneTemplateResourceSpec is a copy of validateK3sControlPlaneSpec which +// only validates the fields in K3sControlPlaneTemplateResourceSpec we care about. +func validateK3sControlPlaneTemplateResourceSpec(s K3sControlPlaneTemplateResourceSpec, pathPrefix *field.Path) field.ErrorList { + return validateRolloutStrategy(s.RolloutStrategy, nil, pathPrefix.Child("rolloutStrategy")) +} diff --git a/controlplane/k3s/api/v1beta1/zz_generated.deepcopy.go b/controlplane/k3s/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..4364ce5b --- /dev/null +++ b/controlplane/k3s/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,363 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlane) DeepCopyInto(out *K3sControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlane. +func (in *K3sControlPlane) DeepCopy() *K3sControlPlane { + if in == nil { + return nil + } + out := new(K3sControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneList) DeepCopyInto(out *K3sControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]K3sControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneList. +func (in *K3sControlPlaneList) DeepCopy() *K3sControlPlaneList { + if in == nil { + return nil + } + out := new(K3sControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneMachineTemplate) DeepCopyInto(out *K3sControlPlaneMachineTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InfrastructureRef = in.InfrastructureRef + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeDeletionTimeout != nil { + in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneMachineTemplate. +func (in *K3sControlPlaneMachineTemplate) DeepCopy() *K3sControlPlaneMachineTemplate { + if in == nil { + return nil + } + out := new(K3sControlPlaneMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneSpec) DeepCopyInto(out *K3sControlPlaneSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.MachineTemplate.DeepCopyInto(&out.MachineTemplate) + in.K3sConfigSpec.DeepCopyInto(&out.K3sConfigSpec) + if in.RolloutAfter != nil { + in, out := &in.RolloutAfter, &out.RolloutAfter + *out = (*in).DeepCopy() + } + if in.RolloutStrategy != nil { + in, out := &in.RolloutStrategy, &out.RolloutStrategy + *out = new(RolloutStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneSpec. +func (in *K3sControlPlaneSpec) DeepCopy() *K3sControlPlaneSpec { + if in == nil { + return nil + } + out := new(K3sControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneStatus) DeepCopyInto(out *K3sControlPlaneStatus) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneStatus. +func (in *K3sControlPlaneStatus) DeepCopy() *K3sControlPlaneStatus { + if in == nil { + return nil + } + out := new(K3sControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplate) DeepCopyInto(out *K3sControlPlaneTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplate. +func (in *K3sControlPlaneTemplate) DeepCopy() *K3sControlPlaneTemplate { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sControlPlaneTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplateList) DeepCopyInto(out *K3sControlPlaneTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]K3sControlPlaneTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateList. +func (in *K3sControlPlaneTemplateList) DeepCopy() *K3sControlPlaneTemplateList { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *K3sControlPlaneTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplateMachineTemplate) DeepCopyInto(out *K3sControlPlaneTemplateMachineTemplate) { + *out = *in + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeDeletionTimeout != nil { + in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateMachineTemplate. +func (in *K3sControlPlaneTemplateMachineTemplate) DeepCopy() *K3sControlPlaneTemplateMachineTemplate { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplateMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplateResource) DeepCopyInto(out *K3sControlPlaneTemplateResource) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateResource. +func (in *K3sControlPlaneTemplateResource) DeepCopy() *K3sControlPlaneTemplateResource { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplateResourceSpec) DeepCopyInto(out *K3sControlPlaneTemplateResourceSpec) { + *out = *in + if in.MachineTemplate != nil { + in, out := &in.MachineTemplate, &out.MachineTemplate + *out = new(K3sControlPlaneTemplateMachineTemplate) + (*in).DeepCopyInto(*out) + } + in.K3sConfigSpec.DeepCopyInto(&out.K3sConfigSpec) + if in.RolloutAfter != nil { + in, out := &in.RolloutAfter, &out.RolloutAfter + *out = (*in).DeepCopy() + } + if in.RolloutStrategy != nil { + in, out := &in.RolloutStrategy, &out.RolloutStrategy + *out = new(RolloutStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateResourceSpec. +func (in *K3sControlPlaneTemplateResourceSpec) DeepCopy() *K3sControlPlaneTemplateResourceSpec { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplateResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K3sControlPlaneTemplateSpec) DeepCopyInto(out *K3sControlPlaneTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateSpec. +func (in *K3sControlPlaneTemplateSpec) DeepCopy() *K3sControlPlaneTemplateSpec { + if in == nil { + return nil + } + out := new(K3sControlPlaneTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy. +func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { + if in == nil { + return nil + } + out := new(RolloutStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/controlplane/k3s/config/certmanager/certificate.yaml b/controlplane/k3s/config/certmanager/certificate.yaml new file mode 100644 index 00000000..0f645290 --- /dev/null +++ b/controlplane/k3s/config/certmanager/certificate.yaml @@ -0,0 +1,25 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize \ No newline at end of file diff --git a/controlplane/k3s/config/certmanager/kustomization.yaml b/controlplane/k3s/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..bebea5a5 --- /dev/null +++ b/controlplane/k3s/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/controlplane/k3s/config/certmanager/kustomizeconfig.yaml b/controlplane/k3s/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000..28a895a4 --- /dev/null +++ b/controlplane/k3s/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames +- kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanes.yaml b/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanes.yaml new file mode 100644 index 00000000..729e85a1 --- /dev/null +++ b/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanes.yaml @@ -0,0 +1,585 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.1 + creationTimestamp: null + name: k3scontrolplanes.controlplane.cluster.x-k8s.io +spec: + group: controlplane.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: K3sControlPlane + listKind: K3sControlPlaneList + plural: k3scontrolplanes + shortNames: + - k3scp + singular: k3scontrolplane + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: K3sControlPlane is the Schema for the k3scontrolplanes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: K3sControlPlaneSpec defines the desired state of K3sControlPlane + properties: + k3sConfigSpec: + description: K3sConfigSpec is a K3sConfigSpec to use for initializing + and joining machines to the control plane. + properties: + agentConfiguration: + description: AgentConfiguration defines the k3s agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address to advertise + for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor client + load-balancer. If the supervisor and apiserver are not + colocated an additional port 1 less than this port will + also be used for the apiserver client load-balancer. + (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting kubelet + with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting kubelet + with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable embedded + containerd and use alternative CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image for containerd + or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private registry + configuration file. + type: string + type: object + type: object + cluster: + description: Cluster defines the k3s cluster Options. + properties: + server: + description: Server which server to connect to, used to join + a cluster. + type: string + token: + description: Token shared secret used to join a server or + agent to a cluster. + type: string + tokenFile: + description: TokenFile file containing the cluster-secret/token. + type: string + type: object + files: + description: Files specifies extra files to be passed to user_data + upon creation. + items: + description: File defines the input for generating write_files + in cloud-init. + properties: + append: + description: Append specifies whether to append Content + to existing file if Path exists. + type: boolean + content: + description: Content is the actual content of the file. + type: string + contentFrom: + description: ContentFrom is a referenced source of content + to populate the file. + properties: + secret: + description: Secret represents a secret that should + populate this file. + properties: + key: + description: Key is the key in the secret's data + map for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of the file + contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the file, + e.g. "root:root". + type: string + path: + description: Path specifies the full path on disk where + to store the file. + type: string + permissions: + description: Permissions specifies the permissions to assign + to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + postK3sCommands: + description: PostK3sCommands specifies extra commands to run after + k3s setup runs + items: + type: string + type: array + preK3sCommands: + description: PreK3sCommands specifies extra commands to run before + k3s setup runs + items: + type: string + type: array + serverConfiguration: + description: ServerConfiguration defines the k3s server configuration. + properties: + agent: + description: Agent is the agent configuration. + properties: + networking: + description: Networking defines the k3s agent networking + configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address to + advertise for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf + file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor + client load-balancer. If the supervisor and apiserver + are not colocated an additional port 1 less than + this port will also be used for the apiserver client + load-balancer. (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting kubelet + with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting kubelet + with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable embedded + containerd and use alternative CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image for + containerd or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private registry + configuration file. + type: string + type: object + type: object + database: + description: Database is the database configuration. + properties: + clusterInit: + description: ClusterInit initialize a new cluster using + embedded Etcd. + type: boolean + dataStoreCAFile: + description: DataStoreCAFile TLS Certificate Authority + file used to secure datastore backend communication. + type: string + dataStoreCertFile: + description: DataStoreCertFile TLS certification file + used to secure datastore backend communication. + type: string + dataStoreEndPoint: + description: DataStoreEndPoint specify etcd, Mysql, Postgres, + or Sqlite (default) data source name. + type: string + dataStoreKeyFile: + description: DataStoreKeyFile TLS key file used to secure + datastore backend communication. + type: string + type: object + listener: + description: Listener is the listener configuration. + properties: + advertiseAddress: + description: AdvertiseAddress IP address that apiserver + uses to advertise to members of the cluster. + type: string + advertisePort: + description: 'AdvertisePort Port that apiserver uses to + advertise to members of the cluster (default: listen-port).' + type: integer + bindAddress: + description: BindAddress k3s bind address. + type: string + httpsListenPort: + description: HTTPSListenPort HTTPS listen port. + type: integer + tlsSan: + description: TLSSan Add additional hostname or IP as a + Subject Alternative Name in the TLS cert. + type: string + type: object + networking: + description: Networking is the networking configuration. + properties: + clusterCIDR: + description: ClusterCIDR Network CIDR to use for pod IPs. + type: string + clusterDNS: + description: ClusterDNS cluster IP for coredns service. + Should be in your service-cidr range. + type: string + clusterDomain: + description: ClusterDomain cluster Domain. + type: string + flannelBackend: + description: 'FlannelBackend One of ‘none’, ‘vxlan’, ‘ipsec’, + ‘host-gw’, or ‘wireguard’. (default: vxlan)' + type: string + serviceCIDR: + description: ServiceCIDR Network CIDR to use for services + IPs. + type: string + serviceNodePortRange: + description: ServiceNodePortRange Port range to reserve + for services with NodePort visibility. + type: string + type: object + type: object + version: + description: Version specifies the k3s version + type: string + type: object + machineTemplate: + description: MachineTemplate contains information about how machines + should be shaped when creating or updating a control plane. + properties: + infrastructureRef: + description: InfrastructureRef is a required reference to a custom + resource offered by an infrastructure provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part + of an object. TODO: this design is not final and this field + is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not queryable + and should be preserved when modifying objects. More info: + http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + nodeDeletionTimeout: + description: NodeDeletionTimeout defines how long the machine + controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of + 0 will retry deletion indefinitely. If no value is provided, + the default value for this property of the Machine resource + will be used. + type: string + nodeDrainTimeout: + description: 'NodeDrainTimeout is the total amount of time that + the controller will spend on draining a controlplane node The + default value is 0, meaning that the node can be drained without + any time limitations. NOTE: NodeDrainTimeout is different from + `kubectl drain --timeout`' + type: string + required: + - infrastructureRef + type: object + replicas: + description: Number of desired machines. Defaults to 1. When stacked + etcd is used only odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members). + This is a pointer to distinguish between explicit zero and not specified. + format: int32 + type: integer + rolloutAfter: + description: RolloutAfter is a field to indicate a rollout should + be performed after the specified time even if no changes have been + made to the KubeadmControlPlane. + format: date-time + type: string + rolloutStrategy: + default: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + description: The RolloutStrategy to use to replace control plane machines + with new ones. + properties: + rollingUpdate: + description: Rolling update config params. Present only if RolloutStrategyType + = RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: 'The maximum number of control planes that can + be scheduled above or under the desired number of control + planes. Value can be an absolute number 1 or 0. Defaults + to 1. Example: when this is set to 1, the control plane + can be scaled up immediately when the rolling update starts.' + x-kubernetes-int-or-string: true + type: object + type: + description: Type of rollout. Currently the only supported strategy + is "RollingUpdate". Default is RollingUpdate. + type: string + type: object + version: + description: Version defines the desired K3s version. + type: string + required: + - machineTemplate + - version + type: object + status: + description: K3sControlPlaneStatus defines the observed state of K3sControlPlane + properties: + conditions: + description: Conditions defines current service state of the KubeadmControlPlane. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: ErrorMessage indicates that there is a terminal problem + reconciling the state, and will be set to a descriptive error message. + type: string + failureReason: + description: FailureReason indicates that there is a terminal problem + reconciling the state, and will be set to a token value suitable + for programmatic interpretation. + type: string + initialized: + description: Initialized denotes whether or not the control plane + has the uploaded kubeadm-config configmap. + type: boolean + observedGeneration: + description: ObservedGeneration is the latest generation observed + by the controller. + format: int64 + type: integer + ready: + description: Ready denotes that the KubeadmControlPlane API Server + is ready to receive requests. + type: boolean + readyReplicas: + description: Total number of fully running and ready control plane + machines. + format: int32 + type: integer + replicas: + description: Total number of non-terminated machines targeted by this + control plane (their labels match the selector). + format: int32 + type: integer + selector: + description: 'Selector is the label selector in string format to avoid + introspection by clients, and is used to provide the CRD-based integration + for the scale subresource and additional integrations for things + like kubectl describe.. The string will be in the same format as + the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors' + type: string + unavailableReplicas: + description: Total number of unavailable machines targeted by this + control plane. This is the total number of machines that are still + required for the deployment to have 100% available capacity. They + may either be machines that are running but not yet ready or machines + that still have not been created. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated machines targeted by this + control plane that have the desired template spec. + format: int32 + type: integer + version: + description: Version represents the minimum Kubernetes version for + the control plane machines in the cluster. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanetemplates.yaml b/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanetemplates.yaml new file mode 100644 index 00000000..954f98d3 --- /dev/null +++ b/controlplane/k3s/config/crd/bases/controlplane.cluster.x-k8s.io_k3scontrolplanetemplates.yaml @@ -0,0 +1,445 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.1 + creationTimestamp: null + name: k3scontrolplanetemplates.controlplane.cluster.x-k8s.io +spec: + group: controlplane.cluster.x-k8s.io + names: + kind: K3sControlPlaneTemplate + listKind: K3sControlPlaneTemplateList + plural: k3scontrolplanetemplates + singular: k3scontrolplanetemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: K3sControlPlaneTemplate is the Schema for the k3scontrolplanetemplates + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: K3sControlPlaneTemplateSpec defines the desired state of + K3sControlPlaneTemplate + properties: + template: + description: K3sControlPlaneTemplateResource describes the data needed + to create a K3sControlPlane from a template. + properties: + spec: + description: 'K3sControlPlaneTemplateResourceSpec defines the + desired state of KubeadmControlPlane. NOTE: K3sControlPlaneTemplateResourceSpec + is similar to K3sControlPlaneSpec but omits Replicas and Version + fields. These fields do not make sense on the K3sControlPlaneTemplate, + because they are calculated by the Cluster topology reconciler + during reconciliation and thus cannot be configured on the K3sControlPlaneTemplate.' + properties: + k3sConfigSpec: + description: K3sConfigSpec is a K3sConfigSpec to use for initializing + and joining machines to the control plane. + properties: + agentConfiguration: + description: AgentConfiguration defines the k3s agent + configuration. + properties: + networking: + description: Networking defines the k3s agent networking + configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address + to advertise for node. + type: string + nodeIP: + description: NodeIP IP address to advertise for + node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf + file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for supervisor + client load-balancer. If the supervisor and + apiserver are not colocated an additional port + 1 less than this port will also be used for + the apiserver client load-balancer. (default: + 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting + kubelet with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting + kubelet with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime + configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable + embedded containerd and use alternative CRI + implementation. + type: string + pauseImage: + description: PauseImage Customized pause image + for containerd or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private + registry configuration file. + type: string + type: object + type: object + cluster: + description: Cluster defines the k3s cluster Options. + properties: + server: + description: Server which server to connect to, used + to join a cluster. + type: string + token: + description: Token shared secret used to join a server + or agent to a cluster. + type: string + tokenFile: + description: TokenFile file containing the cluster-secret/token. + type: string + type: object + files: + description: Files specifies extra files to be passed + to user_data upon creation. + items: + description: File defines the input for generating write_files + in cloud-init. + properties: + append: + description: Append specifies whether to append + Content to existing file if Path exists. + type: boolean + content: + description: Content is the actual content of the + file. + type: string + contentFrom: + description: ContentFrom is a referenced source + of content to populate the file. + properties: + secret: + description: Secret represents a secret that + should populate this file. + properties: + key: + description: Key is the key in the secret's + data map for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of + the file contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the + file, e.g. "root:root". + type: string + path: + description: Path specifies the full path on disk + where to store the file. + type: string + permissions: + description: Permissions specifies the permissions + to assign to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + postK3sCommands: + description: PostK3sCommands specifies extra commands + to run after k3s setup runs + items: + type: string + type: array + preK3sCommands: + description: PreK3sCommands specifies extra commands to + run before k3s setup runs + items: + type: string + type: array + serverConfiguration: + description: ServerConfiguration defines the k3s server + configuration. + properties: + agent: + description: Agent is the agent configuration. + properties: + networking: + description: Networking defines the k3s agent + networking configuration. + properties: + nodeExternalIP: + description: NodeExternalIP External IP address + to advertise for node. + type: string + nodeIP: + description: NodeIP IP address to advertise + for node. + type: string + resolvConf: + description: ResolvConf Path to Kubelet resolv.conf + file. + type: string + type: object + node: + description: Node defines the k3s agent node configuration. + properties: + dataDir: + description: DataDir Folder to hold state. + type: string + lbServerPort: + description: 'LBServerPort Local port for + supervisor client load-balancer. If the + supervisor and apiserver are not colocated + an additional port 1 less than this port + will also be used for the apiserver client + load-balancer. (default: 6444)' + type: integer + nodeLabels: + description: NodeLabels registering and starting + kubelet with set of labels. + items: + type: string + type: array + nodeName: + description: NodeName k3s node name. + type: string + nodeTaints: + description: NodeTaints registering and starting + kubelet with set of taints. + items: + type: string + type: array + seLinux: + description: SeLinux Enable SELinux in containerd + type: boolean + type: object + runtime: + description: Runtime defines the k3s agent runtime + configuration. + properties: + containerRuntimeEndpoint: + description: ContainerRuntimeEndpoint Disable + embedded containerd and use alternative + CRI implementation. + type: string + pauseImage: + description: PauseImage Customized pause image + for containerd or Docker sandbox. + type: string + privateRegistry: + description: PrivateRegistry Path to a private + registry configuration file. + type: string + type: object + type: object + database: + description: Database is the database configuration. + properties: + clusterInit: + description: ClusterInit initialize a new cluster + using embedded Etcd. + type: boolean + dataStoreCAFile: + description: DataStoreCAFile TLS Certificate Authority + file used to secure datastore backend communication. + type: string + dataStoreCertFile: + description: DataStoreCertFile TLS certification + file used to secure datastore backend communication. + type: string + dataStoreEndPoint: + description: DataStoreEndPoint specify etcd, Mysql, + Postgres, or Sqlite (default) data source name. + type: string + dataStoreKeyFile: + description: DataStoreKeyFile TLS key file used + to secure datastore backend communication. + type: string + type: object + listener: + description: Listener is the listener configuration. + properties: + advertiseAddress: + description: AdvertiseAddress IP address that + apiserver uses to advertise to members of the + cluster. + type: string + advertisePort: + description: 'AdvertisePort Port that apiserver + uses to advertise to members of the cluster + (default: listen-port).' + type: integer + bindAddress: + description: BindAddress k3s bind address. + type: string + httpsListenPort: + description: HTTPSListenPort HTTPS listen port. + type: integer + tlsSan: + description: TLSSan Add additional hostname or + IP as a Subject Alternative Name in the TLS + cert. + type: string + type: object + networking: + description: Networking is the networking configuration. + properties: + clusterCIDR: + description: ClusterCIDR Network CIDR to use for + pod IPs. + type: string + clusterDNS: + description: ClusterDNS cluster IP for coredns + service. Should be in your service-cidr range. + type: string + clusterDomain: + description: ClusterDomain cluster Domain. + type: string + flannelBackend: + description: 'FlannelBackend One of ‘none’, ‘vxlan’, + ‘ipsec’, ‘host-gw’, or ‘wireguard’. (default: + vxlan)' + type: string + serviceCIDR: + description: ServiceCIDR Network CIDR to use for + services IPs. + type: string + serviceNodePortRange: + description: ServiceNodePortRange Port range to + reserve for services with NodePort visibility. + type: string + type: object + type: object + version: + description: Version specifies the k3s version + type: string + type: object + machineTemplate: + description: MachineTemplate contains information about how + machines should be shaped when creating or updating a control + plane. + properties: + nodeDeletionTimeout: + description: NodeDeletionTimeout defines how long the + machine controller will attempt to delete the Node that + the Machine hosts after the Machine is marked for deletion. + A duration of 0 will retry deletion indefinitely. If + no value is provided, the default value for this property + of the Machine resource will be used. + type: string + nodeDrainTimeout: + description: 'NodeDrainTimeout is the total amount of + time that the controller will spend on draining a controlplane + node The default value is 0, meaning that the node can + be drained without any time limitations. NOTE: NodeDrainTimeout + is different from `kubectl drain --timeout`' + type: string + type: object + rolloutAfter: + description: RolloutAfter is a field to indicate a rollout + should be performed after the specified time even if no + changes have been made to the KubeadmControlPlane. + format: date-time + type: string + rolloutStrategy: + default: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + description: The RolloutStrategy to use to replace control + plane machines with new ones. + properties: + rollingUpdate: + description: Rolling update config params. Present only + if RolloutStrategyType = RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: 'The maximum number of control planes + that can be scheduled above or under the desired + number of control planes. Value can be an absolute + number 1 or 0. Defaults to 1. Example: when this + is set to 1, the control plane can be scaled up + immediately when the rolling update starts.' + x-kubernetes-int-or-string: true + type: object + type: + description: Type of rollout. Currently the only supported + strategy is "RollingUpdate". Default is RollingUpdate. + type: string + type: object + required: + - k3sConfigSpec + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controlplane/k3s/config/crd/kustomization.yaml b/controlplane/k3s/config/crd/kustomization.yaml new file mode 100644 index 00000000..7ca630c2 --- /dev/null +++ b/controlplane/k3s/config/crd/kustomization.yaml @@ -0,0 +1,27 @@ +commonLabels: + cluster.x-k8s.io/v1beta1: v1beta1 + +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/controlplane.cluster.x-k8s.io_k3scontrolplanes.yaml +- bases/controlplane.cluster.x-k8s.io_k3scontrolplanetemplates.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- patches/webhook_in_k3scontrolplanes.yaml +- patches/webhook_in_k3scontrolplanetemplates.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +- patches/cainjection_in_k3scontrolplanes.yaml +- patches/cainjection_in_k3scontrolplanetemplates.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/controlplane/k3s/config/crd/kustomizeconfig.yaml b/controlplane/k3s/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/controlplane/k3s/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanes.yaml b/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanes.yaml new file mode 100644 index 00000000..8a4b3869 --- /dev/null +++ b/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanes.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: k3scontrolplanes.controlplane.cluster.x-k8s.io diff --git a/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanetemplates.yaml b/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanetemplates.yaml new file mode 100644 index 00000000..a5779327 --- /dev/null +++ b/controlplane/k3s/config/crd/patches/cainjection_in_k3scontrolplanetemplates.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: k3scontrolplanetemplates.controlplane.cluster.x-k8s.io diff --git a/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanes.yaml b/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanes.yaml new file mode 100644 index 00000000..a96c014f --- /dev/null +++ b/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanes.yaml @@ -0,0 +1,18 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: k3scontrolplanes.controlplane.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanetemplates.yaml b/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanetemplates.yaml new file mode 100644 index 00000000..53e5e9c0 --- /dev/null +++ b/controlplane/k3s/config/crd/patches/webhook_in_k3scontrolplanetemplates.yaml @@ -0,0 +1,18 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: k3scontrolplanetemplates.controlplane.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/controlplane/k3s/config/default/kustomization.yaml b/controlplane/k3s/config/default/kustomization.yaml new file mode 100644 index 00000000..439bb3bd --- /dev/null +++ b/controlplane/k3s/config/default/kustomization.yaml @@ -0,0 +1,58 @@ +namespace: capkk-k3s-control-plane-system + +namePrefix: capkk-k3s-control-plane- + +commonLabels: + cluster.x-k8s.io/provider: "control-plane-k3s" + +resources: + - namespace.yaml + +bases: + - ../crd + - ../rbac + - ../manager + - ../webhook + - ../certmanager + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + # Enable webhook. + - manager_webhook_patch.yaml + # Inject certificate in the webhook definition. + - webhookcainjection_patch.yaml + # Enable aggregated ClusterRole aggregation + - manager_role_aggregation_patch.yaml + +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/controlplane/k3s/config/default/kustomizeconfig.yaml b/controlplane/k3s/config/default/kustomizeconfig.yaml new file mode 100644 index 00000000..eb191e64 --- /dev/null +++ b/controlplane/k3s/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/controlplane/k3s/config/default/manager_image_patch.yaml b/controlplane/k3s/config/default/manager_image_patch.yaml new file mode 100644 index 00000000..ac1f04b8 --- /dev/null +++ b/controlplane/k3s/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: docker.io/kubespheredev/k3s-control-plane-controller:main + name: manager diff --git a/controlplane/k3s/config/default/manager_pull_policy.yaml b/controlplane/k3s/config/default/manager_pull_policy.yaml new file mode 100644 index 00000000..74a0879c --- /dev/null +++ b/controlplane/k3s/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/controlplane/k3s/config/default/manager_role_aggregation_patch.yaml b/controlplane/k3s/config/default/manager_role_aggregation_patch.yaml new file mode 100644 index 00000000..25399dfe --- /dev/null +++ b/controlplane/k3s/config/default/manager_role_aggregation_patch.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role + labels: + kubeadm.controlplane.cluster.x-k8s.io/aggregate-to-manager: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: aggregated-manager-role diff --git a/controlplane/k3s/config/default/manager_webhook_patch.yaml b/controlplane/k3s/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..bccef6d7 --- /dev/null +++ b/controlplane/k3s/config/default/manager_webhook_patch.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert diff --git a/controlplane/k3s/config/default/namespace.yaml b/controlplane/k3s/config/default/namespace.yaml new file mode 100644 index 00000000..8b55c3cd --- /dev/null +++ b/controlplane/k3s/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/controlplane/k3s/config/default/webhookcainjection_patch.yaml b/controlplane/k3s/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000..02ab515d --- /dev/null +++ b/controlplane/k3s/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/controlplane/k3s/config/manager/kustomization.yaml b/controlplane/k3s/config/manager/kustomization.yaml new file mode 100644 index 00000000..7394a6d0 --- /dev/null +++ b/controlplane/k3s/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: + - manager.yaml diff --git a/controlplane/k3s/config/manager/manager.yaml b/controlplane/k3s/config/manager/manager.yaml new file mode 100644 index 00000000..f1190754 --- /dev/null +++ b/controlplane/k3s/config/manager/manager.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--metrics-bind-addr=localhost:8080" + image: controller:latest + name: manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/controlplane/k3s/config/rbac/aggregated_role.yaml b/controlplane/k3s/config/rbac/aggregated_role.yaml new file mode 100644 index 00000000..7c07f5a5 --- /dev/null +++ b/controlplane/k3s/config/rbac/aggregated_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregated-manager-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + kubeadm.controlplane.cluster.x-k8s.io/aggregate-to-manager: "true" +rules: [] diff --git a/controlplane/k3s/config/rbac/kustomization.yaml b/controlplane/k3s/config/rbac/kustomization.yaml new file mode 100644 index 00000000..03140358 --- /dev/null +++ b/controlplane/k3s/config/rbac/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +- aggregated_role.yaml diff --git a/controlplane/k3s/config/rbac/leader_election_role.yaml b/controlplane/k3s/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..4190ec80 --- /dev/null +++ b/controlplane/k3s/config/rbac/leader_election_role.yaml @@ -0,0 +1,37 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/controlplane/k3s/config/rbac/leader_election_role_binding.yaml b/controlplane/k3s/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..a73dfa95 --- /dev/null +++ b/controlplane/k3s/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: + - kind: ServiceAccount + name: manager + namespace: system diff --git a/controlplane/k3s/config/rbac/role.yaml b/controlplane/k3s/config/rbac/role.yaml new file mode 100644 index 00000000..edf8324f --- /dev/null +++ b/controlplane/k3s/config/rbac/role.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + - controlplane.cluster.x-k8s.io + - infrastructure.cluster.x-k8s.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - clusters/status + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machines + - machines/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - patch + - update + - watch diff --git a/controlplane/k3s/config/rbac/role_binding.yaml b/controlplane/k3s/config/rbac/role_binding.yaml new file mode 100644 index 00000000..3ffc9c2e --- /dev/null +++ b/controlplane/k3s/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: + - kind: ServiceAccount + name: manager + namespace: system diff --git a/controlplane/k3s/config/rbac/service_account.yaml b/controlplane/k3s/config/rbac/service_account.yaml new file mode 100644 index 00000000..77f747b5 --- /dev/null +++ b/controlplane/k3s/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplane.yaml b/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplane.yaml new file mode 100644 index 00000000..b89a0562 --- /dev/null +++ b/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplane.yaml @@ -0,0 +1,6 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K3sControlPlane +metadata: + name: k3scontrolplane-sample +spec: + # TODO(user): Add fields here diff --git a/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplanetemplate.yaml b/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplanetemplate.yaml new file mode 100644 index 00000000..3c8aa7d9 --- /dev/null +++ b/controlplane/k3s/config/samples/controlplane_v1beta1_k3scontrolplanetemplate.yaml @@ -0,0 +1,6 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K3sControlPlaneTemplate +metadata: + name: k3scontrolplanetemplate-sample +spec: + # TODO(user): Add fields here diff --git a/controlplane/k3s/config/samples/kustomization.yaml b/controlplane/k3s/config/samples/kustomization.yaml new file mode 100644 index 00000000..bd527764 --- /dev/null +++ b/controlplane/k3s/config/samples/kustomization.yaml @@ -0,0 +1,5 @@ +## Append samples you want in your CSV to this file as resources ## +resources: +- controlplane_v1beta1_k3scontrolplane.yaml +- controlplane_v1beta1_k3scontrolplanetemplate.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controlplane/k3s/config/scorecard/bases/config.yaml b/controlplane/k3s/config/scorecard/bases/config.yaml new file mode 100644 index 00000000..c7704784 --- /dev/null +++ b/controlplane/k3s/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/controlplane/k3s/config/scorecard/kustomization.yaml b/controlplane/k3s/config/scorecard/kustomization.yaml new file mode 100644 index 00000000..50cd2d08 --- /dev/null +++ b/controlplane/k3s/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/controlplane/k3s/config/scorecard/patches/basic.config.yaml b/controlplane/k3s/config/scorecard/patches/basic.config.yaml new file mode 100644 index 00000000..90f7ef77 --- /dev/null +++ b/controlplane/k3s/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/controlplane/k3s/config/scorecard/patches/olm.config.yaml b/controlplane/k3s/config/scorecard/patches/olm.config.yaml new file mode 100644 index 00000000..b55840e1 --- /dev/null +++ b/controlplane/k3s/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.23.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/controlplane/k3s/config/webhook/kustomization.yaml b/controlplane/k3s/config/webhook/kustomization.yaml new file mode 100644 index 00000000..9cf26134 --- /dev/null +++ b/controlplane/k3s/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/controlplane/k3s/config/webhook/kustomizeconfig.yaml b/controlplane/k3s/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000..25e21e3c --- /dev/null +++ b/controlplane/k3s/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/controlplane/k3s/config/webhook/manifests.yaml b/controlplane/k3s/config/webhook/manifests.yaml new file mode 100644 index 00000000..a1d142c8 --- /dev/null +++ b/controlplane/k3s/config/webhook/manifests.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane + failurePolicy: Fail + matchPolicy: Equivalent + name: default.k3scontrolplane.controlplane.cluster.x-k8s.io + rules: + - apiGroups: + - controlplane.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3scontrolplanes + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate + failurePolicy: Fail + name: default.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io + rules: + - apiGroups: + - controlplane.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3scontrolplanetemplates + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.k3scontrolplane.controlplane.cluster.x-k8s.io + rules: + - apiGroups: + - controlplane.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3scontrolplanes + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate + failurePolicy: Fail + name: validation.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io + rules: + - apiGroups: + - controlplane.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - k3scontrolplanetemplates + sideEffects: None diff --git a/controlplane/k3s/config/webhook/service.yaml b/controlplane/k3s/config/webhook/service.yaml new file mode 100644 index 00000000..9bc95014 --- /dev/null +++ b/controlplane/k3s/config/webhook/service.yaml @@ -0,0 +1,10 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server diff --git a/controlplane/k3s/controllers/consts.go b/controlplane/k3s/controllers/consts.go new file mode 100644 index 00000000..c5545ffc --- /dev/null +++ b/controlplane/k3s/controllers/consts.go @@ -0,0 +1,35 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "time" +) + +const ( + // deleteRequeueAfter is how long to wait before checking again to see if + // all control plane machines have been deleted. + deleteRequeueAfter = 30 * time.Second + + // preflightFailedRequeueAfter is how long to wait before trying to scale + // up/down if some preflight check for those operation has failed. + preflightFailedRequeueAfter = 15 * time.Second + + // dependentCertRequeueAfter is how long to wait before checking again to see if + // dependent certificates have been created. + dependentCertRequeueAfter = 30 * time.Second +) diff --git a/controlplane/k3s/controllers/doc.go b/controlplane/k3s/controllers/doc.go new file mode 100644 index 00000000..e2710628 --- /dev/null +++ b/controlplane/k3s/controllers/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package controllers contains k3s control plane controller. +package controllers diff --git a/controlplane/k3s/controllers/helpers.go b/controlplane/k3s/controllers/helpers.go new file mode 100644 index 00000000..c0368de0 --- /dev/null +++ b/controlplane/k3s/controllers/helpers.go @@ -0,0 +1,301 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/storage/names" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster" + "github.com/kubesphere/kubekey/util/secret" +) + +func (r *K3sControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane) (ctrl.Result, error) { + endpoint := cluster.Spec.ControlPlaneEndpoint + if endpoint.IsZero() { + return ctrl.Result{}, nil + } + + controllerOwnerRef := *metav1.NewControllerRef(kcp, infracontrolplanev1.GroupVersion.WithKind("K3sControlPlane")) + clusterName := util.ObjectKey(cluster) + configSecret, err := secret.GetFromNamespacedName(ctx, r.Client, clusterName, secret.Kubeconfig) + switch { + case apierrors.IsNotFound(err): + createErr := kubeconfig.CreateSecretWithOwner( + ctx, + r.Client, + clusterName, + endpoint.String(), + controllerOwnerRef, + ) + if errors.Is(createErr, kubeconfig.ErrDependentCertificateNotFound) { + return ctrl.Result{RequeueAfter: dependentCertRequeueAfter}, nil + } + // always return if we have just created in order to skip rotation checks + return ctrl.Result{}, createErr + case err != nil: + return ctrl.Result{}, errors.Wrap(err, "failed to retrieve kubeconfig Secret") + } + + // check if the kubeconfig secret was created by controllers, and thus it has the Cluster as the owner instead of KCP; + // if yes, adopt it. + if util.IsOwnedByObject(configSecret, cluster) && !util.IsControlledBy(configSecret, kcp) { + if err := r.adoptKubeconfigSecret(ctx, cluster, configSecret, controllerOwnerRef); err != nil { + return ctrl.Result{}, err + } + } + + // only do rotation on owned secrets + if !util.IsControlledBy(configSecret, kcp) { + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +func (r *K3sControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, controllerOwnerRef metav1.OwnerReference) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Adopting KubeConfig secret created by controllers", "Name", configSecret.Name) + + patch, err := patch.NewHelper(configSecret, r.Client) + if err != nil { + return errors.Wrap(err, "failed to create patch helper for the kubeconfig secret") + } + configSecret.OwnerReferences = util.RemoveOwnerRef(configSecret.OwnerReferences, metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + }) + configSecret.OwnerReferences = util.EnsureOwnerRef(configSecret.OwnerReferences, controllerOwnerRef) + if err := patch.Patch(ctx, configSecret); err != nil { + return errors.Wrap(err, "failed to patch the kubeconfig secret") + } + return nil +} + +func (r *K3sControlPlaneReconciler) reconcileExternalReference(ctx context.Context, cluster *clusterv1.Cluster, ref *corev1.ObjectReference) error { + if !strings.HasSuffix(ref.Kind, clusterv1.TemplateSuffix) { + return nil + } + + if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { + return err + } + + obj, err := external.Get(ctx, r.Client, ref, cluster.Namespace) + if err != nil { + return err + } + + // Note: We intentionally do not handle checking for the paused label on an external template reference + + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return err + } + + obj.SetOwnerReferences(util.EnsureOwnerRef(obj.GetOwnerReferences(), metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + })) + + return patchHelper.Patch(ctx, obj) +} + +func (r *K3sControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane, bootstrapSpec *infrabootstrapv1.K3sConfigSpec, failureDomain *string) error { + var errs []error + + // Since the cloned resource should eventually have a controller ref for the Machine, we create an + // OwnerReference here without the Controller field set + infraCloneOwner := &metav1.OwnerReference{ + APIVersion: infracontrolplanev1.GroupVersion.String(), + Kind: "K3sControlPlane", + Name: kcp.Name, + UID: kcp.UID, + } + + // Clone the infrastructure template + infraRef, err := external.CloneTemplate(ctx, &external.CloneTemplateInput{ + Client: r.Client, + TemplateRef: &kcp.Spec.MachineTemplate.InfrastructureRef, + Namespace: kcp.Namespace, + OwnerRef: infraCloneOwner, + ClusterName: cluster.Name, + Labels: k3sCluster.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, + }) + if err != nil { + // Safe to return early here since no resources have been created yet. + conditions.MarkFalse(kcp, infracontrolplanev1.MachinesCreatedCondition, infracontrolplanev1.InfrastructureTemplateCloningFailedReason, + clusterv1.ConditionSeverityError, err.Error()) + return errors.Wrap(err, "failed to clone infrastructure template") + } + + // Clone the bootstrap configuration + bootstrapRef, err := r.generateK3sConfig(ctx, kcp, cluster, bootstrapSpec) + if err != nil { + conditions.MarkFalse(kcp, infracontrolplanev1.MachinesCreatedCondition, infracontrolplanev1.BootstrapTemplateCloningFailedReason, + clusterv1.ConditionSeverityError, err.Error()) + errs = append(errs, errors.Wrap(err, "failed to generate bootstrap config")) + } + + // Only proceed to generating the Machine if we haven't encountered an error + if len(errs) == 0 { + if err := r.generateMachine(ctx, kcp, cluster, infraRef, bootstrapRef, failureDomain); err != nil { + conditions.MarkFalse(kcp, infracontrolplanev1.MachinesCreatedCondition, infracontrolplanev1.MachineGenerationFailedReason, + clusterv1.ConditionSeverityError, err.Error()) + errs = append(errs, errors.Wrap(err, "failed to create Machine")) + } + } + + // If we encountered any errors, attempt to clean up any dangling resources + if len(errs) > 0 { + if err := r.cleanupFromGeneration(ctx, infraRef, bootstrapRef); err != nil { + errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources")) + } + + return kerrors.NewAggregate(errs) + } + + return nil +} + +func (r *K3sControlPlaneReconciler) cleanupFromGeneration(ctx context.Context, remoteRefs ...*corev1.ObjectReference) error { + var errs []error + + for _, ref := range remoteRefs { + if ref == nil { + continue + } + config := &unstructured.Unstructured{} + config.SetKind(ref.Kind) + config.SetAPIVersion(ref.APIVersion) + config.SetNamespace(ref.Namespace) + config.SetName(ref.Name) + + if err := r.Client.Delete(ctx, config); err != nil && !apierrors.IsNotFound(err) { + errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources after error")) + } + } + + return kerrors.NewAggregate(errs) +} + +func (r *K3sControlPlaneReconciler) generateK3sConfig(ctx context.Context, kcp *infracontrolplanev1.K3sControlPlane, cluster *clusterv1.Cluster, spec *infrabootstrapv1.K3sConfigSpec) (*corev1.ObjectReference, error) { + // Create an owner reference without a controller reference because the owning controller is the machine controller + owner := metav1.OwnerReference{ + APIVersion: infracontrolplanev1.GroupVersion.String(), + Kind: "K3sControlPlane", + Name: kcp.Name, + UID: kcp.UID, + } + + bootstrapConfig := &infrabootstrapv1.K3sConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName(kcp.Name + "-"), + Namespace: kcp.Namespace, + Labels: k3sCluster.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, + OwnerReferences: []metav1.OwnerReference{owner}, + }, + Spec: *spec, + } + + if err := r.Client.Create(ctx, bootstrapConfig); err != nil { + return nil, errors.Wrap(err, "Failed to create bootstrap configuration") + } + + bootstrapRef := &corev1.ObjectReference{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "K3sConfig", + Name: bootstrapConfig.GetName(), + Namespace: bootstrapConfig.GetNamespace(), + UID: bootstrapConfig.GetUID(), + } + + return bootstrapRef, nil +} + +func (r *K3sControlPlaneReconciler) generateMachine(ctx context.Context, kcp *infracontrolplanev1.K3sControlPlane, cluster *clusterv1.Cluster, infraRef, bootstrapRef *corev1.ObjectReference, failureDomain *string) error { + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName(kcp.Name + "-"), + Namespace: kcp.Namespace, + Labels: k3sCluster.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("K3sControlPlane")), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: &kcp.Spec.Version, + InfrastructureRef: *infraRef, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + FailureDomain: failureDomain, + NodeDrainTimeout: kcp.Spec.MachineTemplate.NodeDrainTimeout, + }, + } + if kcp.Spec.MachineTemplate.NodeDeletionTimeout != nil { + machine.Spec.NodeDeletionTimeout = kcp.Spec.MachineTemplate.NodeDeletionTimeout + } + + // Machine's bootstrap config may be missing ClusterConfiguration if it is not the first machine in the control plane. + // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. + serverConfig, err := json.Marshal(kcp.Spec.K3sConfigSpec.ServerConfiguration) + if err != nil { + return errors.Wrap(err, "failed to marshal cluster configuration") + } + + // Add the annotations from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { + machine.Annotations[k] = v + } + machine.Annotations[infracontrolplanev1.K3sServerConfigurationAnnotation] = string(serverConfig) + + if err := r.Client.Create(ctx, machine); err != nil { + return errors.Wrap(err, "failed to create machine") + } + return nil +} diff --git a/controlplane/k3s/controllers/k3scontrolplane_controller.go b/controlplane/k3s/controllers/k3scontrolplane_controller.go new file mode 100644 index 00000000..a3ad63b3 --- /dev/null +++ b/controlplane/k3s/controllers/k3scontrolplane_controller.go @@ -0,0 +1,598 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/blang/semver" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster" + "github.com/kubesphere/kubekey/util/secret" +) + +// K3sControlPlaneReconciler reconciles a K3sControlPlane object +type K3sControlPlaneReconciler struct { + client.Client + Scheme *runtime.Scheme + APIReader client.Reader + controller controller.Controller + recorder record.EventRecorder + Tracker *remote.ClusterCacheTracker + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string + + managementCluster k3sCluster.ManagementCluster + managementClusterUncached k3sCluster.ManagementCluster +} + +// SetupWithManager sets up the controller with the Manager. +func (r *K3sControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + c, err := ctrl.NewControllerManagedBy(mgr). + For(&infracontrolplanev1.K3sControlPlane{}). + Owns(&clusterv1.Machine{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Build(r) + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + err = c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + handler.EnqueueRequestsFromMapFunc(r.ClusterToK3sControlPlane), + predicates.All(ctrl.LoggerFrom(ctx), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + ), + ) + if err != nil { + return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") + } + + r.controller = c + r.recorder = mgr.GetEventRecorderFor("k3s-control-plane-controller") + + if r.managementCluster == nil { + if r.Tracker == nil { + return errors.New("cluster cache tracker is nil, cannot create the internal management cluster resource") + } + r.managementCluster = &k3sCluster.Management{ + Client: r.Client, + Tracker: r.Tracker, + } + } + + if r.managementClusterUncached == nil { + r.managementClusterUncached = &k3sCluster.Management{Client: mgr.GetAPIReader()} + } + + return nil +} + +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch + +// Reconcile handles K3sControlPlane events. +func (r *K3sControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, retErr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the K3sControlPlane instance. + kcp := &infracontrolplanev1.K3sControlPlane{} + if err := r.Client.Get(ctx, req.NamespacedName, kcp); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, nil + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, kcp.ObjectMeta) + if err != nil { + log.Error(err, "Failed to retrieve owner Cluster from the API Server") + return ctrl.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return ctrl.Result{}, nil + } + log = log.WithValues("cluster", cluster.Name) + + if annotations.IsPaused(cluster, kcp) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Initialize the patch helper. + patchHelper, err := patch.NewHelper(kcp, r.Client) + if err != nil { + log.Error(err, "Failed to configure the patch helper") + return ctrl.Result{Requeue: true}, nil + } + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(kcp, infracontrolplanev1.K3sControlPlaneFinalizer) { + controllerutil.AddFinalizer(kcp, infracontrolplanev1.K3sControlPlaneFinalizer) + + // patch and return right away instead of reusing the main defer, + // because the main defer may take too much time to get cluster status + // Patch ObservedGeneration only if the reconciliation completed successfully + patchOpts := []patch.Option{patch.WithStatusObservedGeneration{}} + if err := patchHelper.Patch(ctx, kcp, patchOpts...); err != nil { + log.Error(err, "Failed to patch K3sControlPlane to add finalizer") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + } + + defer func() { + // Always attempt to update status. + if err := r.updateStatus(ctx, kcp, cluster); err != nil { + var connFailure *k3sCluster.RemoteClusterConnectionError + if errors.As(err, &connFailure) { + log.Info("Could not connect to workload cluster to fetch status", "err", err.Error()) + } else { + log.Error(err, "Failed to update KubeadmControlPlane Status") + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + } + + // Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation. + if err := patchK3sControlPlane(ctx, patchHelper, kcp); err != nil { + log.Error(err, "Failed to patch KubeadmControlPlane") + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // TODO: remove this as soon as we have a proper remote cluster cache in place. + // Make KCP to requeue in case status is not ready, so we can check for node status without waiting for a full resync (by default 10 minutes). + // Only requeue if we are not going in exponential backoff due to error, or if we are not already re-queueing, or if the object has a deletion timestamp. + if retErr == nil && !res.Requeue && res.RequeueAfter <= 0 && kcp.ObjectMeta.DeletionTimestamp.IsZero() { + if !kcp.Status.Ready { + res = ctrl.Result{RequeueAfter: 20 * time.Second} + } + } + }() + + if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { + // Handle deletion reconciliation loop. + return r.reconcileDelete(ctx, cluster, kcp) + } + + // Handle normal reconciliation loop. + return r.reconcile(ctx, cluster, kcp) +} + +func patchK3sControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *infracontrolplanev1.K3sControlPlane) error { + // Always update the readyCondition by summarizing the state of other conditions. + conditions.SetSummary(kcp, + conditions.WithConditions( + infracontrolplanev1.MachinesCreatedCondition, + infracontrolplanev1.MachinesSpecUpToDateCondition, + infracontrolplanev1.ResizedCondition, + infracontrolplanev1.MachinesReadyCondition, + infracontrolplanev1.AvailableCondition, + infracontrolplanev1.CertificatesAvailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + return patchHelper.Patch( + ctx, + kcp, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + infracontrolplanev1.MachinesCreatedCondition, + clusterv1.ReadyCondition, + infracontrolplanev1.MachinesSpecUpToDateCondition, + infracontrolplanev1.ResizedCondition, + infracontrolplanev1.MachinesReadyCondition, + infracontrolplanev1.AvailableCondition, + infracontrolplanev1.CertificatesAvailableCondition, + }}, + patch.WithStatusObservedGeneration{}, + ) +} + +// reconcile handles KubeadmControlPlane reconciliation. +func (r *K3sControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane) (res ctrl.Result, retErr error) { + log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log.Info("Reconcile KubeadmControlPlane") + + // Make sure to reconcile the external infrastructure reference. + if err := r.reconcileExternalReference(ctx, cluster, &kcp.Spec.MachineTemplate.InfrastructureRef); err != nil { + return ctrl.Result{}, err + } + + // Wait for the cluster infrastructure to be ready before creating machines + if !cluster.Status.InfrastructureReady { + log.Info("Cluster infrastructure is not ready yet") + return ctrl.Result{}, nil + } + + // Generate Cluster Certificates if needed + config := kcp.Spec.K3sConfigSpec.DeepCopy() + config.AgentConfiguration = nil + if config.ServerConfiguration == nil { + config.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{} + } + certificates := secret.NewCertificatesForInitialControlPlane() + controllerRef := metav1.NewControllerRef(kcp, infracontrolplanev1.GroupVersion.WithKind("K3sControlPlane")) + if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(cluster), *controllerRef); err != nil { + log.Error(err, "unable to lookup or create cluster certificates") + conditions.MarkFalse(kcp, infracontrolplanev1.CertificatesAvailableCondition, infracontrolplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, err + } + conditions.MarkTrue(kcp, infracontrolplanev1.CertificatesAvailableCondition) + + // If ControlPlaneEndpoint is not set, return early + if !cluster.Spec.ControlPlaneEndpoint.IsValid() { + log.Info("Cluster does not yet have a ControlPlaneEndpoint defined") + return ctrl.Result{}, nil + } + + // Generate Cluster Kubeconfig if needed + if result, err := r.reconcileKubeconfig(ctx, cluster, kcp); !result.IsZero() || err != nil { + if err != nil { + log.Error(err, "failed to reconcile Kubeconfig") + } + return result, err + } + + controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.ControlPlaneMachines(cluster.Name)) + if err != nil { + log.Error(err, "failed to retrieve control plane machines for cluster") + return ctrl.Result{}, err + } + + adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name)) + if len(adoptableMachines) > 0 { + // We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date + err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster) + return ctrl.Result{}, err + } + + ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp)) + if len(ownedMachines) != len(controlPlaneMachines) { + log.Info("Not all control plane machines are owned by this K3sControlPlane, refusing to operate in mixed management mode") + return ctrl.Result{}, nil + } + + controlPlane, err := k3sCluster.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) + if err != nil { + log.Error(err, "failed to initialize control plane") + return ctrl.Result{}, err + } + + // Aggregate the operational state of all the machines; while aggregating we are adding the + // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. + conditions.SetAggregate(controlPlane.KCP, infracontrolplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + + // Updates conditions reporting the status of static pods and the status of the etcd cluster. + // NOTE: Conditions reporting KCP operation progress like e.g. Resized or SpecUpToDate are inlined with the rest of the execution. + if result, err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil || !result.IsZero() { + return result, err + } + + // Reconcile unhealthy machines by triggering deletion and requeue if it is considered safe to remediate, + // otherwise continue with the other KCP operations. + //if result, err := r.reconcileUnhealthyMachines(ctx, controlPlane); err != nil || !result.IsZero() { + // return result, err + //} + + // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. + needRollout := controlPlane.MachinesNeedingRollout() + switch { + case len(needRollout) > 0: + log.Info("Rolling out Control Plane machines", "needRollout", needRollout.Names()) + conditions.MarkFalse(controlPlane.KCP, infracontrolplanev1.MachinesSpecUpToDateCondition, infracontrolplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(controlPlane.Machines)-len(needRollout)) + return r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needRollout) + default: + // make sure last upgrade operation is marked as completed. + // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first + // reconciliation/before a rolling upgrade actually starts. + if conditions.Has(controlPlane.KCP, infracontrolplanev1.MachinesSpecUpToDateCondition) { + conditions.MarkTrue(controlPlane.KCP, infracontrolplanev1.MachinesSpecUpToDateCondition) + } + } + + // If we've made it this far, we can assume that all ownedMachines are up to date + numMachines := len(ownedMachines) + desiredReplicas := int(*kcp.Spec.Replicas) + + switch { + // We are creating the first replica + case numMachines < desiredReplicas && numMachines == 0: + // Create new Machine w/ init + log.Info("Initializing control plane", "Desired", desiredReplicas, "Existing", numMachines) + conditions.MarkFalse(controlPlane.KCP, infracontrolplanev1.AvailableCondition, infracontrolplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") + return r.initializeControlPlane(ctx, cluster, kcp, controlPlane) + // We are scaling up + case numMachines < desiredReplicas && numMachines > 0: + // Create a new Machine w/ join + log.Info("Scaling up control plane", "Desired", desiredReplicas, "Existing", numMachines) + return r.scaleUpControlPlane(ctx, cluster, kcp, controlPlane) + // We are scaling down + case numMachines > desiredReplicas: + log.Info("Scaling down control plane", "Desired", desiredReplicas, "Existing", numMachines) + // The last parameter (i.e. machines needing to be rolled out) should always be empty here. + return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, collections.Machines{}) + } + + return ctrl.Result{}, nil +} + +// reconcileDelete handles K3sControlPlane deletion. +// The implementation does not take non-control plane workloads into consideration. This may or may not change in the future. +// Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064. +func (r *K3sControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log.Info("Reconcile KubeadmControlPlane deletion") + + // Gets all machines, not just control plane machines. + allMachines, err := r.managementCluster.GetMachinesForCluster(ctx, cluster) + if err != nil { + return ctrl.Result{}, err + } + ownedMachines := allMachines.Filter(collections.OwnedMachines(kcp)) + + // If no control plane machines remain, remove the finalizer + if len(ownedMachines) == 0 { + controllerutil.RemoveFinalizer(kcp, infracontrolplanev1.K3sControlPlaneFinalizer) + return ctrl.Result{}, nil + } + + controlPlane, err := k3sCluster.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) + if err != nil { + log.Error(err, "failed to initialize control plane") + return ctrl.Result{}, err + } + + // Updates conditions reporting the status of static pods and the status of the etcd cluster. + // NOTE: Ignoring failures given that we are deleting + if _, err := r.reconcileControlPlaneConditions(ctx, controlPlane); err != nil { + log.Info("failed to reconcile conditions", "error", err.Error()) + } + + // Aggregate the operational state of all the machines; while aggregating we are adding the + // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. + // However, during delete we are hiding the counter (1 of x) because it does not make sense given that + // all the machines are deleted in parallel. + conditions.SetAggregate(kcp, infracontrolplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + + allMachinePools := &expv1.MachinePoolList{} + // Get all machine pools. + if feature.Gates.Enabled(feature.MachinePool) { + allMachinePools, err = r.managementCluster.GetMachinePoolsForCluster(ctx, cluster) + if err != nil { + return ctrl.Result{}, err + } + } + // Verify that only control plane machines remain + if len(allMachines) != len(ownedMachines) || len(allMachinePools.Items) != 0 { + log.Info("Waiting for worker nodes to be deleted first") + conditions.MarkFalse(kcp, infracontrolplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") + return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil + } + + // Delete control plane machines in parallel + machinesToDelete := ownedMachines.Filter(collections.Not(collections.HasDeletionTimestamp)) + var errs []error + for i := range machinesToDelete { + m := machinesToDelete[i] + logger := log.WithValues("machine", m) + if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to cleanup owned machine") + errs = append(errs, err) + } + } + if len(errs) > 0 { + err := kerrors.NewAggregate(errs) + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedDelete", + "Failed to delete control plane Machines for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + return ctrl.Result{}, err + } + conditions.MarkFalse(kcp, infracontrolplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil +} + +func (r *K3sControlPlaneReconciler) adoptMachines(ctx context.Context, kcp *infracontrolplanev1.K3sControlPlane, machines collections.Machines, cluster *clusterv1.Cluster) error { + // We do an uncached full quorum read against the KCP to avoid re-adopting Machines the garbage collector just intentionally orphaned + // See https://github.com/kubernetes/kubernetes/issues/42639 + uncached := infracontrolplanev1.K3sControlPlane{} + err := r.managementClusterUncached.Get(ctx, client.ObjectKey{Namespace: kcp.Namespace, Name: kcp.Name}, &uncached) + if err != nil { + return errors.Wrapf(err, "failed to check whether %v/%v was deleted before adoption", kcp.GetNamespace(), kcp.GetName()) + } + if !uncached.DeletionTimestamp.IsZero() { + return errors.Errorf("%v/%v has just been deleted at %v", kcp.GetNamespace(), kcp.GetName(), kcp.GetDeletionTimestamp()) + } + + kcpVersion, err := semver.ParseTolerant(kcp.Spec.Version) + if err != nil { + return errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + } + + for _, m := range machines { + ref := m.Spec.Bootstrap.ConfigRef + + // TODO instead of returning error here, we should instead Event and add a watch on potentially adoptable Machines + if ref == nil || ref.Kind != "K3sConfig" { + return errors.Errorf("unable to adopt Machine %v/%v: expected a ConfigRef of kind K3sConfig but instead found %v", m.Namespace, m.Name, ref) + } + + // TODO instead of returning error here, we should instead Event and add a watch on potentially adoptable Machines + if ref.Namespace != "" && ref.Namespace != kcp.Namespace { + return errors.Errorf("could not adopt resources from K3sConfig %v/%v: cannot adopt across namespaces", ref.Namespace, ref.Name) + } + + if m.Spec.Version == nil { + // if the machine's version is not immediately apparent, assume the operator knows what they're doing + continue + } + + machineVersion, err := semver.ParseTolerant(*m.Spec.Version) + if err != nil { + return errors.Wrapf(err, "failed to parse kubernetes version %q", *m.Spec.Version) + } + + if !util.IsSupportedVersionSkew(kcpVersion, machineVersion) { + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "AdoptionFailed", "Could not adopt Machine %s/%s: its version (%q) is outside supported +/- one minor version skew from KCP's (%q)", m.Namespace, m.Name, *m.Spec.Version, kcp.Spec.Version) + // avoid returning an error here so we don't cause the KCP controller to spin until the operator clarifies their intent + return nil + } + } + + for _, m := range machines { + ref := m.Spec.Bootstrap.ConfigRef + cfg := &infrabootstrapv1.K3sConfig{} + + if err := r.Client.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: kcp.Namespace}, cfg); err != nil { + return err + } + + if err := r.adoptOwnedSecrets(ctx, kcp, cfg, cluster.Name); err != nil { + return err + } + + patchHelper, err := patch.NewHelper(m, r.Client) + if err != nil { + return err + } + + if err := controllerutil.SetControllerReference(kcp, m, r.Client.Scheme()); err != nil { + return err + } + + // Note that ValidateOwnerReferences() will reject this patch if another + // OwnerReference exists with controller=true. + if err := patchHelper.Patch(ctx, m); err != nil { + return err + } + } + return nil +} + +func (r *K3sControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, kcp *infracontrolplanev1.K3sControlPlane, currentOwner *infrabootstrapv1.K3sConfig, clusterName string) error { + secrets := corev1.SecretList{} + if err := r.Client.List(ctx, &secrets, client.InNamespace(kcp.Namespace), client.MatchingLabels{clusterv1.ClusterLabelName: clusterName}); err != nil { + return errors.Wrap(err, "error finding secrets for adoption") + } + + for i := range secrets.Items { + s := secrets.Items[i] + if !util.IsOwnedByObject(&s, currentOwner) { + continue + } + // avoid taking ownership of the bootstrap data secret + if currentOwner.Status.DataSecretName != nil && s.Name == *currentOwner.Status.DataSecretName { + continue + } + + ss := s.DeepCopy() + + ss.SetOwnerReferences(util.ReplaceOwnerRef(ss.GetOwnerReferences(), currentOwner, metav1.OwnerReference{ + APIVersion: infracontrolplanev1.GroupVersion.String(), + Kind: "K3sControlPlane", + Name: kcp.Name, + UID: kcp.UID, + Controller: pointer.Bool(true), + BlockOwnerDeletion: pointer.Bool(true), + })) + + if err := r.Client.Update(ctx, ss); err != nil { + return errors.Wrapf(err, "error changing secret %v ownership from K3sConfig/%v to K3sControlPlane/%v", s.Name, currentOwner.GetName(), kcp.Name) + } + } + + return nil +} + +// reconcileControlPlaneConditions is responsible of reconciling conditions reporting the status of static pods and +// the status of the etcd cluster. +func (r *K3sControlPlaneReconciler) reconcileControlPlaneConditions(ctx context.Context, controlPlane *k3sCluster.ControlPlane) (ctrl.Result, error) { + // If the cluster is not yet initialized, there is no way to connect to the workload cluster and fetch information + // for updating conditions. Return early. + if !controlPlane.KCP.Status.Initialized { + return ctrl.Result{}, nil + } + + workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster") + } + + // Update conditions status + workloadCluster.UpdateAgentConditions(ctx, controlPlane) + workloadCluster.UpdateEtcdConditions(ctx, controlPlane) + + // Patch machines with the updated conditions. + if err := controlPlane.PatchMachines(ctx); err != nil { + return ctrl.Result{}, err + } + + // KCP will be patched at the end of Reconcile to reflect updated conditions, so we can return now. + return ctrl.Result{}, nil +} + +// ClusterToK3sControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation +// for K3sControlPlane based on updates to a Cluster. +func (r *K3sControlPlaneReconciler) ClusterToK3sControlPlane(o client.Object) []ctrl.Request { + c, ok := o.(*clusterv1.Cluster) + if !ok { + panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) + } + + controlPlaneRef := c.Spec.ControlPlaneRef + if controlPlaneRef != nil && controlPlaneRef.Kind == "K3sControlPlane" { + return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} + } + + return nil +} diff --git a/controlplane/k3s/controllers/scale.go b/controlplane/k3s/controllers/scale.go new file mode 100644 index 00000000..54ca3c59 --- /dev/null +++ b/controlplane/k3s/controllers/scale.go @@ -0,0 +1,206 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster" +) + +func (r *K3sControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane, controlPlane *k3sCluster.ControlPlane) (ctrl.Result, error) { + logger := controlPlane.Logger() + + // Perform an uncached read of all the owned machines. This check is in place to make sure + // that the controller cache is not misbehaving and we end up initializing the cluster more than once. + ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, cluster, collections.OwnedMachines(kcp)) + if err != nil { + logger.Error(err, "failed to perform an uncached read of control plane machines for cluster") + return ctrl.Result{}, err + } + if len(ownedMachines) > 0 { + return ctrl.Result{}, errors.Errorf( + "control plane has already been initialized, found %d owned machine for cluster %s/%s: controller cache or management cluster is misbehaving", + len(ownedMachines), cluster.Namespace, cluster.Name, + ) + } + + bootstrapSpec := controlPlane.InitialControlPlaneConfig() + fd := controlPlane.NextFailureDomainForScaleUp() + if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil { + logger.Error(err, "Failed to create initial control plane Machine") + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + return ctrl.Result{}, err + } + + // Requeue the control plane, in case there are additional operations to perform + return ctrl.Result{Requeue: true}, nil +} + +func (r *K3sControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane, controlPlane *k3sCluster.ControlPlane) (ctrl.Result, error) { + logger := controlPlane.Logger() + + // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. + if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { + return result, err + } + + // Create the bootstrap configuration + bootstrapSpec := controlPlane.JoinControlPlaneConfig() + fd := controlPlane.NextFailureDomainForScaleUp() + if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil { + logger.Error(err, "Failed to create additional control plane Machine") + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) + return ctrl.Result{}, err + } + + // Requeue the control plane, in case there are other operations to perform + return ctrl.Result{Requeue: true}, nil +} + +func (r *K3sControlPlaneReconciler) scaleDownControlPlane( + ctx context.Context, + cluster *clusterv1.Cluster, + kcp *infracontrolplanev1.K3sControlPlane, + controlPlane *k3sCluster.ControlPlane, + outdatedMachines collections.Machines, +) (ctrl.Result, error) { + logger := controlPlane.Logger() + + // Pick the Machine that we should scale down. + machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") + } + + // Run preflight checks ensuring the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. + // Given that we're scaling down, we can exclude the machineToDelete from the preflight checks. + if result, err := r.preflightChecks(ctx, controlPlane, machineToDelete); err != nil || !result.IsZero() { + return result, err + } + + if machineToDelete == nil { + logger.Info("Failed to pick control plane Machine to delete") + return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete") + } + + logger = logger.WithValues("machine", machineToDelete.Name) + if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to delete control plane machine") + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown", + "Failed to delete control plane Machine %s for cluster %s/%s control plane: %v", machineToDelete.Name, cluster.Namespace, cluster.Name, err) + return ctrl.Result{}, err + } + + // Requeue the control plane, in case there are additional operations to perform + return ctrl.Result{Requeue: true}, nil +} + +// preflightChecks checks if the control plane is stable before proceeding with a scale up/scale down operation, +// where stable means that: +// - There are no machine deletion in progress +// - All the health conditions on KCP are true. +// - All the health conditions on the control plane machines are true. +// If the control plane is not passing preflight checks, it requeue. +// +// NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. +func (r *K3sControlPlaneReconciler) preflightChecks(_ context.Context, controlPlane *k3sCluster.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam + logger := controlPlane.Logger() + + // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, + // so it is considered ok to proceed. + if controlPlane.Machines.Len() == 0 { + return ctrl.Result{}, nil + } + + // If there are deleting machines, wait for the operation to complete. + if controlPlane.HasDeletingMachine() { + logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) + return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil + } + + // Check machine health conditions; if there are conditions with False or Unknown, then wait. + allMachineHealthConditions := []clusterv1.ConditionType{ + infracontrolplanev1.MachineAgentHealthyCondition, + } + var machineErrors []error + +loopmachines: + for _, machine := range controlPlane.Machines { + for _, excluded := range excludeFor { + // If this machine should be excluded from the individual + // health check, continue the out loop. + if machine.Name == excluded.Name { + continue loopmachines + } + } + + for _, condition := range allMachineHealthConditions { + if err := preflightCheckCondition("machine", machine, condition); err != nil { + machineErrors = append(machineErrors, err) + } + } + } + if len(machineErrors) > 0 { + aggregatedError := kerrors.NewAggregate(machineErrors) + r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "ControlPlaneUnhealthy", + "Waiting for control plane to pass preflight checks to continue reconciliation: %v", aggregatedError) + logger.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) + + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + } + + return ctrl.Result{}, nil +} + +func preflightCheckCondition(kind string, obj conditions.Getter, condition clusterv1.ConditionType) error { + c := conditions.Get(obj, condition) + if c == nil { + return errors.Errorf("%s %s does not have %s condition", kind, obj.GetName(), condition) + } + if c.Status == corev1.ConditionFalse { + return errors.Errorf("%s %s reports %s condition is false (%s, %s)", kind, obj.GetName(), condition, c.Severity, c.Message) + } + if c.Status == corev1.ConditionUnknown { + return errors.Errorf("%s %s reports %s condition is unknown (%s)", kind, obj.GetName(), condition, c.Message) + } + return nil +} + +func selectMachineForScaleDown(controlPlane *k3sCluster.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { + machines := controlPlane.Machines + switch { + case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: + machines = controlPlane.MachineWithDeleteAnnotation(outdatedMachines) + case controlPlane.MachineWithDeleteAnnotation(machines).Len() > 0: + machines = controlPlane.MachineWithDeleteAnnotation(machines) + case outdatedMachines.Len() > 0: + machines = outdatedMachines + } + return controlPlane.MachineInFailureDomainWithMostMachines(machines) +} diff --git a/controlplane/k3s/controllers/status.go b/controlplane/k3s/controllers/status.go new file mode 100644 index 00000000..3af4be3f --- /dev/null +++ b/controlplane/k3s/controllers/status.go @@ -0,0 +1,120 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/pkg/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster" +) + +// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the +// resource status subresourcs up-to-date. +func (r *K3sControlPlaneReconciler) updateStatus(ctx context.Context, kcp *infracontrolplanev1.K3sControlPlane, cluster *clusterv1.Cluster) error { + log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + + selector := collections.ControlPlaneSelectorForCluster(cluster.Name) + // Copy label selector to its status counterpart in string format. + // This is necessary for CRDs including scale subresources. + kcp.Status.Selector = selector.String() + + ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, cluster, collections.OwnedMachines(kcp)) + if err != nil { + return errors.Wrap(err, "failed to get list of owned machines") + } + + controlPlane, err := k3sCluster.NewControlPlane(ctx, r.Client, cluster, kcp, ownedMachines) + if err != nil { + log.Error(err, "failed to initialize control plane") + return err + } + kcp.Status.UpdatedReplicas = int32(len(controlPlane.UpToDateMachines())) + + replicas := int32(len(ownedMachines)) + desiredReplicas := *kcp.Spec.Replicas + + // set basic data that does not require interacting with the workload cluster + kcp.Status.Replicas = replicas + kcp.Status.ReadyReplicas = 0 + kcp.Status.UnavailableReplicas = replicas + + // Return early if the deletion timestamp is set, because we don't want to try to connect to the workload cluster + // and we don't want to report resize condition (because it is set to deleting into reconcile delete). + if !kcp.DeletionTimestamp.IsZero() { + return nil + } + + machinesWithHealthAPIServer := ownedMachines.Filter(collections.HealthyAPIServer()) + lowestVersion := machinesWithHealthAPIServer.LowestVersion() + if lowestVersion != nil { + kcp.Status.Version = lowestVersion + } + + switch { + // We are scaling up + case replicas < desiredReplicas: + conditions.MarkFalse(kcp, infracontrolplanev1.ResizedCondition, infracontrolplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) + // We are scaling down + case replicas > desiredReplicas: + conditions.MarkFalse(kcp, infracontrolplanev1.ResizedCondition, infracontrolplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) + + // This means that there was no error in generating the desired number of machine objects + conditions.MarkTrue(kcp, infracontrolplanev1.MachinesCreatedCondition) + default: + // make sure last resize operation is marked as completed. + // NOTE: we are checking the number of machines ready so we report resize completed only when the machines + // are actually provisioned (vs reporting completed immediately after the last machine object is created). + readyMachines := ownedMachines.Filter(collections.IsReady()) + if int32(len(readyMachines)) == replicas { + conditions.MarkTrue(kcp, infracontrolplanev1.ResizedCondition) + } + + // This means that there was no error in generating the desired number of machine objects + conditions.MarkTrue(kcp, infracontrolplanev1.MachinesCreatedCondition) + } + + workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + if err != nil { + return errors.Wrap(err, "failed to create remote cluster client") + } + status, err := workloadCluster.ClusterStatus(ctx) + if err != nil { + return err + } + kcp.Status.ReadyReplicas = status.ReadyNodes + kcp.Status.UnavailableReplicas = replicas - status.ReadyNodes + + // This only gets initialized once and does not change if the k3s config map goes away. + if status.HasK3sConfig { + kcp.Status.Initialized = true + conditions.MarkTrue(kcp, infracontrolplanev1.AvailableCondition) + } + + if kcp.Status.ReadyReplicas > 0 { + kcp.Status.Ready = true + } + + return nil +} diff --git a/controlplane/k3s/controllers/suite_test.go b/controlplane/k3s/controllers/suite_test.go new file mode 100644 index 00000000..436d11e4 --- /dev/null +++ b/controlplane/k3s/controllers/suite_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +//// These tests use Ginkgo (BDD-style Go testing framework). Refer to +//// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// +//var cfg *rest.Config +//var k8sClient client.Client +//var testEnv *envtest.Environment +// +//func TestAPIs(t *testing.T) { +// RegisterFailHandler(Fail) +// +// RunSpecsWithDefaultAndCustomReporters(t, +// "Controller Suite", +// []Reporter{printer.NewlineReporter{}}) +//} +// +//var _ = BeforeSuite(func() { +// logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +// +// By("bootstrapping test environment") +// testEnv = &envtest.Environment{ +// CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, +// ErrorIfCRDPathMissing: true, +// } +// +// var err error +// // cfg is defined in this file globally. +// cfg, err = testEnv.Start() +// Expect(err).NotTo(HaveOccurred()) +// Expect(cfg).NotTo(BeNil()) +// +// err = controlplanev1beta1.AddToScheme(scheme.Scheme) +// Expect(err).NotTo(HaveOccurred()) +// +// //+kubebuilder:scaffold:scheme +// +// k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) +// Expect(err).NotTo(HaveOccurred()) +// Expect(k8sClient).NotTo(BeNil()) +// +//}, 60) +// +//var _ = AfterSuite(func() { +// By("tearing down the test environment") +// err := testEnv.Stop() +// Expect(err).NotTo(HaveOccurred()) +//}) diff --git a/controlplane/k3s/controllers/upgrade.go b/controlplane/k3s/controllers/upgrade.go new file mode 100644 index 00000000..a0ee9507 --- /dev/null +++ b/controlplane/k3s/controllers/upgrade.go @@ -0,0 +1,58 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/pkg/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + ctrl "sigs.k8s.io/controller-runtime" + + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster" +) + +func (r *K3sControlPlaneReconciler) upgradeControlPlane( + ctx context.Context, + cluster *clusterv1.Cluster, + kcp *infracontrolplanev1.K3sControlPlane, + controlPlane *k3sCluster.ControlPlane, + machinesRequireUpgrade collections.Machines, +) (ctrl.Result, error) { + logger := controlPlane.Logger() + + if kcp.Spec.RolloutStrategy == nil || kcp.Spec.RolloutStrategy.RollingUpdate == nil { + return ctrl.Result{}, errors.New("rolloutStrategy is not set") + } + + switch kcp.Spec.RolloutStrategy.Type { + case infracontrolplanev1.RollingUpdateStrategyType: + // RolloutStrategy is currently defaulted and validated to be RollingUpdate + // We can ignore MaxUnavailable because we are enforcing health checks before we get here. + maxNodes := *kcp.Spec.Replicas + int32(kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntValue()) + if int32(controlPlane.Machines.Len()) < maxNodes { + // scaleUp ensures that we don't continue scaling up while waiting for Machines to have NodeRefs + return r.scaleUpControlPlane(ctx, cluster, kcp, controlPlane) + } + return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, machinesRequireUpgrade) + default: + logger.Info("RolloutStrategy type is not set to RollingUpdateStrategyType, unable to determine the strategy for rolling out machines") + return ctrl.Result{}, nil + } +} diff --git a/controlplane/k3s/hack/boilerplate.go.txt b/controlplane/k3s/hack/boilerplate.go.txt new file mode 100644 index 00000000..62802d18 --- /dev/null +++ b/controlplane/k3s/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/controlplane/k3s/main.go b/controlplane/k3s/main.go new file mode 100644 index 00000000..3aa7364b --- /dev/null +++ b/controlplane/k3s/main.go @@ -0,0 +1,218 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package main +package main + +import ( + "flag" + "fmt" + "math/rand" + "os" + "time" + + "github.com/spf13/pflag" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/healthz" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" + "github.com/kubesphere/kubekey/controlplane/k3s/controllers" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme)) + utilruntime.Must(infrabootstrapv1.AddToScheme(scheme)) + utilruntime.Must(infracontrolplanev1.AddToScheme(scheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme +} + +var ( + metricsBindAddr string + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchFilterValue string + watchNamespace string + k3sControlPlaneConcurrency int + syncPeriod time.Duration + webhookPort int + webhookCertDir string + healthAddr string +) + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsBindAddr, "metrics-bind-addr", "localhost:8080", + "The address the metric endpoint binds to.") + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 1*time.Minute, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 40*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 5*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&watchNamespace, "namespace", "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + + fs.IntVar(&k3sControlPlaneConcurrency, "k3scontrolplane-concurrency", 10, + "Number of kubeadm control planes to process simultaneously") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir, only used when webhook-port is specified.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") +} + +func main() { + rand.Seed(time.Now().UnixNano()) + + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + ctrl.SetLogger(klogr.New()) + + ctx := ctrl.SetupSignalHandler() + + restConfig := ctrl.GetConfigOrDie() + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent("cluster-api-k3s-control-plane-manager") + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsBindAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "k3s-control-plane-manager-leader-election-capkk", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + Namespace: watchNamespace, + SyncPeriod: &syncPeriod, + ClientDisableCacheFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + Port: webhookPort, + HealthProbeBindAddress: healthAddr, + CertDir: webhookCertDir, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + log := ctrl.Log.WithName("remote").WithName("ClusterCacheTracker") + tracker, err := remote.NewClusterCacheTracker(mgr, remote.ClusterCacheTrackerOptions{ + Log: &log, + Indexes: remote.DefaultIndexes, + ClientUncachedObjects: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + &corev1.Pod{}, + &appsv1.Deployment{}, + &appsv1.DaemonSet{}, + }, + }) + if err != nil { + setupLog.Error(err, "unable to create cluster cache tracker") + os.Exit(1) + } + if err := (&remote.ClusterCacheReconciler{ + Client: mgr.GetClient(), + Tracker: tracker, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(k3sControlPlaneConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler") + os.Exit(1) + } + + if err = (&controllers.K3sControlPlaneReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + APIReader: mgr.GetAPIReader(), + Tracker: tracker, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(k3sControlPlaneConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "K3sControlPlane") + os.Exit(1) + } + if err = (&infracontrolplanev1.K3sControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "K3sControlPlane") + os.Exit(1) + } + if err = (&infracontrolplanev1.K3sControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "K3sControlPlaneTemplate") + os.Exit(1) + } + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func concurrency(c int) controller.Options { + return controller.Options{MaxConcurrentReconciles: c} +} diff --git a/controlplane/k3s/pkg/cluster/cluster.go b/controlplane/k3s/pkg/cluster/cluster.go new file mode 100644 index 00000000..d9844081 --- /dev/null +++ b/controlplane/k3s/pkg/cluster/cluster.go @@ -0,0 +1,113 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + "context" + "time" + + "github.com/pkg/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // K3ssControlPlaneControllerName defines the controller used when creating clients. + K3ssControlPlaneControllerName = "k3s-controlplane-controller" +) + +// ManagementCluster defines all behaviors necessary for something to function as a management cluster. +type ManagementCluster interface { + client.Reader + + GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error) + GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) + GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) +} + +// Management holds operations on the management cluster. +type Management struct { + Client client.Reader + Tracker *remote.ClusterCacheTracker +} + +// RemoteClusterConnectionError represents a failure to connect to a remote cluster. +type RemoteClusterConnectionError struct { + Name string + Err error +} + +// Error satisfies the error interface. +func (e *RemoteClusterConnectionError) Error() string { return e.Name + ": " + e.Err.Error() } + +// Unwrap satisfies the unwrap error inteface. +func (e *RemoteClusterConnectionError) Unwrap() error { return e.Err } + +// Get implements ctrlclient.Reader +func (m *Management) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { + return m.Client.Get(ctx, key, obj) +} + +// List implements ctrlclient.Reader +func (m *Management) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return m.Client.List(ctx, list, opts...) +} + +// GetMachinesForCluster returns a list of machines that can be filtered or not. +// If no filter is supplied then all machines associated with the target cluster are returned. +func (m *Management) GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error) { + return collections.GetFilteredMachinesForCluster(ctx, m.Client, cluster, filters...) +} + +// GetMachinePoolsForCluster returns a list of machine pools owned by the cluster. +func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) { + selectors := []client.ListOption{ + client.InNamespace(cluster.GetNamespace()), + client.MatchingLabels{ + clusterv1.ClusterLabelName: cluster.GetName(), + }, + } + machinePoolList := &expv1.MachinePoolList{} + err := m.Client.List(ctx, machinePoolList, selectors...) + return machinePoolList, err +} + +// GetWorkloadCluster builds a cluster object. +// The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. +func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { + restConfig, err := remote.RESTConfig(ctx, K3ssControlPlaneControllerName, m.Client, clusterKey) + if err != nil { + return nil, err + } + restConfig.Timeout = 30 * time.Second + + if m.Tracker == nil { + return nil, errors.New("Cannot get WorkloadCluster: No remote Cluster Cache") + } + + c, err := m.Tracker.GetClient(ctx, clusterKey) + if err != nil { + return nil, err + } + + return &Workload{ + Client: c, + }, nil +} diff --git a/controlplane/k3s/pkg/cluster/cluster_labels.go b/controlplane/k3s/pkg/cluster/cluster_labels.go new file mode 100644 index 00000000..b92b9a09 --- /dev/null +++ b/controlplane/k3s/pkg/cluster/cluster_labels.go @@ -0,0 +1,39 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" +) + +// ControlPlaneMachineLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. +func ControlPlaneMachineLabelsForCluster(kcp *infracontrolplanev1.K3sControlPlane, clusterName string) map[string]string { + labels := map[string]string{} + + // Add the labels from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Labels { + labels[k] = v + } + + // Always force these labels over the ones coming from the spec. + labels[clusterv1.ClusterLabelName] = clusterName + labels[clusterv1.MachineControlPlaneLabelName] = "" + return labels +} diff --git a/controlplane/k3s/pkg/cluster/control_plane.go b/controlplane/k3s/pkg/cluster/control_plane.go new file mode 100644 index 00000000..0f34eced --- /dev/null +++ b/controlplane/k3s/pkg/cluster/control_plane.go @@ -0,0 +1,290 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/failuredomains" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" +) + +// Log is the global logger for the internal package. +var Log = klogr.New() + +// ControlPlane holds business logic around control planes. +// It should never need to connect to a service, that responsibility lies outside of this struct. +// Going forward we should be trying to add more logic to here and reduce the amount of logic in the reconciler. +type ControlPlane struct { + KCP *infracontrolplanev1.K3sControlPlane + Cluster *clusterv1.Cluster + Machines collections.Machines + machinesPatchHelpers map[string]*patch.Helper + + // reconciliationTime is the time of the current reconciliation, and should be used for all "now" calculations + reconciliationTime metav1.Time + + // TODO: we should see if we can combine these with the Machine objects so we don't have all these separate lookups + // See discussion on https://github.com/kubernetes-sigs/cluster-api/pull/3405 + k3sConfigs map[string]*infrabootstrapv1.K3sConfig + infraResources map[string]*unstructured.Unstructured +} + +// NewControlPlane returns an instantiated ControlPlane. +func NewControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane, ownedMachines collections.Machines) (*ControlPlane, error) { + infraObjects, err := getInfraResources(ctx, client, ownedMachines) + if err != nil { + return nil, err + } + k3sConfigs, err := getK3sConfigs(ctx, client, ownedMachines) + if err != nil { + return nil, err + } + patchHelpers := map[string]*patch.Helper{} + for _, machine := range ownedMachines { + patchHelper, err := patch.NewHelper(machine, client) + if err != nil { + return nil, errors.Wrapf(err, "failed to create patch helper for machine %s", machine.Name) + } + patchHelpers[machine.Name] = patchHelper + } + + return &ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: ownedMachines, + machinesPatchHelpers: patchHelpers, + k3sConfigs: k3sConfigs, + infraResources: infraObjects, + reconciliationTime: metav1.Now(), + }, nil +} + +// Logger returns a logger with useful context. +func (c *ControlPlane) Logger() logr.Logger { + return Log.WithValues("namespace", c.KCP.Namespace, "name", c.KCP.Name, "cluster-name", c.Cluster.Name) +} + +// FailureDomains returns a slice of failure domain objects synced from the infrastructure provider into Cluster.Status. +func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { + if c.Cluster.Status.FailureDomains == nil { + return clusterv1.FailureDomains{} + } + return c.Cluster.Status.FailureDomains +} + +// Version returns the KThreesControlPlane's version. +func (c *ControlPlane) Version() *string { + return &c.KCP.Spec.Version +} + +// MachineInfrastructureTemplateRef returns the KubeadmControlPlane's infrastructure template for Machines. +func (c *ControlPlane) MachineInfrastructureTemplateRef() *corev1.ObjectReference { + return &c.KCP.Spec.MachineTemplate.InfrastructureRef +} + +// AsOwnerReference returns an owner reference to the K3sControlPlane. +func (c *ControlPlane) AsOwnerReference() *metav1.OwnerReference { + return &metav1.OwnerReference{ + APIVersion: infracontrolplanev1.GroupVersion.String(), + Kind: "K3sControlPlane", + Name: c.KCP.Name, + UID: c.KCP.UID, + } +} + +// MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it. +func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) { + fd := c.FailureDomainWithMostMachines(machines) + machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd)) + machineToMark := machinesInFailureDomain.Oldest() + if machineToMark == nil { + return nil, errors.New("failed to pick control plane Machine to mark for deletion") + } + return machineToMark, nil +} + +// MachineWithDeleteAnnotation returns a machine that has been annotated with DeleteMachineAnnotation key. +func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines) collections.Machines { + // See if there are any machines with DeleteMachineAnnotation key. + annotatedMachines := machines.Filter(collections.HasAnnotationKey(clusterv1.DeleteMachineAnnotation)) + // If there are, return list of annotated machines. + return annotatedMachines +} + +// FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most +// control-plane machines on it. +func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string { + // See if there are any Machines that are not in currently defined failure domains first. + notInFailureDomains := machines.Filter( + collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)), + ) + if len(notInFailureDomains) > 0 { + // return the failure domain for the oldest Machine not in the current list of failure domains + // this could be either nil (no failure domain defined) or a failure domain that is no longer defined + // in the cluster status. + return notInFailureDomains.Oldest().Spec.FailureDomain + } + return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) +} + +// NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines. +func (c *ControlPlane) NextFailureDomainForScaleUp() *string { + if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 { + return nil + } + return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) +} + +// InitialControlPlaneConfig returns a new K3sConfigSpec that is to be used for an initializing control plane. +func (c *ControlPlane) InitialControlPlaneConfig() *infrabootstrapv1.K3sConfigSpec { + bootstrapSpec := c.KCP.Spec.K3sConfigSpec.DeepCopy() + bootstrapSpec.AgentConfiguration = nil + return bootstrapSpec +} + +// JoinControlPlaneConfig returns a new K3sConfigSpec that is to be used for joining control planes. +func (c *ControlPlane) JoinControlPlaneConfig() *infrabootstrapv1.K3sConfigSpec { + bootstrapSpec := c.KCP.Spec.K3sConfigSpec.DeepCopy() + bootstrapSpec.AgentConfiguration = nil + return bootstrapSpec +} + +// GenerateK3sConfig generates a new k3s config for creating new control plane nodes. +func (c *ControlPlane) GenerateK3sConfig(spec *infrabootstrapv1.K3sConfigSpec) *infrabootstrapv1.K3sConfig { + // Create an owner reference without a controller reference because the owning controller is the machine controller + owner := metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "K3sControlPlane", + Name: c.KCP.Name, + UID: c.KCP.UID, + } + + bootstrapConfig := &infrabootstrapv1.K3sConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName(c.KCP.Name + "-"), + Namespace: c.KCP.Namespace, + Labels: ControlPlaneMachineLabelsForCluster(c.KCP, c.Cluster.Name), + Annotations: c.KCP.Spec.MachineTemplate.ObjectMeta.Annotations, + OwnerReferences: []metav1.OwnerReference{owner}, + }, + Spec: *spec, + } + return bootstrapConfig +} + +// getInfraResources fetches the external infrastructure resource for each machine in the collection and returns a map of machine.Name -> infraResource. +func getInfraResources(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { + result := map[string]*unstructured.Unstructured{} + for _, m := range machines { + infraObj, err := external.Get(ctx, cl, &m.Spec.InfrastructureRef, m.Namespace) + if err != nil { + if apierrors.IsNotFound(errors.Cause(err)) { + continue + } + return nil, errors.Wrapf(err, "failed to retrieve infra obj for machine %q", m.Name) + } + result[m.Name] = infraObj + } + return result, nil +} + +// getK3sConfigs fetches the kubeadm config for each machine in the collection and returns a map of machine.Name -> K3sConfig. +func getK3sConfigs(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*infrabootstrapv1.K3sConfig, error) { + result := map[string]*infrabootstrapv1.K3sConfig{} + for _, m := range machines { + bootstrapRef := m.Spec.Bootstrap.ConfigRef + if bootstrapRef == nil { + continue + } + machineConfig := &infrabootstrapv1.K3sConfig{} + if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, machineConfig); err != nil { + if apierrors.IsNotFound(errors.Cause(err)) { + continue + } + return nil, errors.Wrapf(err, "failed to retrieve bootstrap config for machine %q", m.Name) + } + result[m.Name] = machineConfig + } + return result, nil +} + +// HasDeletingMachine returns true if any machine in the control plane is in the process of being deleted. +func (c *ControlPlane) HasDeletingMachine() bool { + return len(c.Machines.Filter(collections.HasDeletionTimestamp)) > 0 +} + +// MachinesNeedingRollout return a list of machines that need to be rolled out. +func (c *ControlPlane) MachinesNeedingRollout() collections.Machines { + // Ignore machines to be deleted. + machines := c.Machines.Filter(collections.Not(collections.HasDeletionTimestamp)) + + // Return machines if they are scheduled for rollout or if with an outdated configuration. + return machines.AnyFilter( + // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, the RolloutAfter deadline is expired, and the machine was created before the deadline). + collections.ShouldRolloutAfter(&c.reconciliationTime, c.KCP.Spec.RolloutAfter), + // Machines that do not match with KCP config. + collections.Not(MatchesMachineSpec(c.infraResources, c.k3sConfigs, c.KCP)), + ) +} + +// UpToDateMachines returns the machines that are up to date with the control +// plane's configuration and therefore do not require rollout. +func (c *ControlPlane) UpToDateMachines() collections.Machines { + return c.Machines.Filter( + // Machines that shouldn't be rolled out after the deadline has expired. + collections.Not(collections.ShouldRolloutAfter(&c.reconciliationTime, c.KCP.Spec.RolloutAfter)), + // Machines that match with KCP config. + MatchesMachineSpec(c.infraResources, c.k3sConfigs, c.KCP), + ) +} + +// PatchMachines patches all the machines conditions. +func (c *ControlPlane) PatchMachines(ctx context.Context) error { + errList := make([]error, 0) + for i := range c.Machines { + machine := c.Machines[i] + if helper, ok := c.machinesPatchHelpers[machine.Name]; ok { + if err := helper.Patch(ctx, machine, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + infracontrolplanev1.MachineAgentHealthyCondition, + infracontrolplanev1.MachineEtcdMemberHealthyCondition, + }}); err != nil { + errList = append(errList, errors.Wrapf(err, "failed to patch machine %s", machine.Name)) + } + continue + } + errList = append(errList, errors.Errorf("failed to get patch helper for machine %s", machine.Name)) + } + return kerrors.NewAggregate(errList) +} diff --git a/controlplane/k3s/pkg/cluster/doc.go b/controlplane/k3s/pkg/cluster/doc.go new file mode 100644 index 00000000..86c2fc56 --- /dev/null +++ b/controlplane/k3s/pkg/cluster/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package cluster contains internal implementation details for the k3s Control Plane. +package cluster diff --git a/controlplane/k3s/pkg/cluster/filters.go b/controlplane/k3s/pkg/cluster/filters.go new file mode 100644 index 00000000..ab76c95e --- /dev/null +++ b/controlplane/k3s/pkg/cluster/filters.go @@ -0,0 +1,215 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1" + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" +) + +// MatchesMachineSpec returns a filter to find all machines that matches with KCP config and do not require any rollout. +// Kubernetes version, infrastructure template, and K3sConfig field need to be equivalent. +func MatchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*infrabootstrapv1.K3sConfig, kcp *infracontrolplanev1.K3sControlPlane) func(machine *clusterv1.Machine) bool { + return collections.And( + func(machine *clusterv1.Machine) bool { + return matchMachineTemplateMetadata(kcp, machine) + }, + collections.MatchesKubernetesVersion(kcp.Spec.Version), + MatchesK3sBootstrapConfig(machineConfigs, kcp), + MatchesTemplateClonedFrom(infraConfigs, kcp), + ) +} + +// MatchesTemplateClonedFrom returns a filter to find all machines that match a given KCP infra template. +func MatchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *infracontrolplanev1.K3sControlPlane) collections.Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + infraObj, found := infraConfigs[machine.Name] + if !found { + // Return true here because failing to get infrastructure machine should not be considered as unmatching. + return true + } + + clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] + clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] + if !ok1 || !ok2 { + // All kcp cloned infra machines should have this annotation. + // Missing the annotation may be due to older version machines or adopted machines. + // Should not be considered as mismatch. + return true + } + + // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. + if clonedFromName != kcp.Spec.MachineTemplate.InfrastructureRef.Name || + clonedFromGroupKind != kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String() { + return false + } + + // Check if the machine template metadata matches with the infrastructure object. + if !matchMachineTemplateMetadata(kcp, infraObj) { + return false + } + return true + } +} + +// MatchesK3sBootstrapConfig checks if machine's K3sConfigSpec is equivalent with KCP's K3sConfigSpec. +func MatchesK3sBootstrapConfig(machineConfigs map[string]*infrabootstrapv1.K3sConfig, kcp *infracontrolplanev1.K3sControlPlane) collections.Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + + bootstrapRef := machine.Spec.Bootstrap.ConfigRef + if bootstrapRef == nil { + // Missing bootstrap reference should not be considered as unmatching. + // This is a safety precaution to avoid selecting machines that are broken, which in the future should be remediated separately. + return true + } + + machineConfig, found := machineConfigs[machine.Name] + if !found { + // Return true here because failing to get K3sConfig should not be considered as unmatching. + // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. + return true + } + + // Check if the machine template metadata matches with the infrastructure object. + if !matchMachineTemplateMetadata(kcp, machineConfig) { + return false + } + + // Check if KCP and machine InitConfiguration or JoinConfiguration matches + // NOTE: only one between init configuration and join configuration is set on a machine, depending + // on the fact that the machine was the initial control plane node or a joining control plane node. + return matchInitOrJoinConfiguration(machineConfig, kcp) + } +} + +// matchInitOrJoinConfiguration verifies if KCP and machine ServerConfiguration or AgentConfiguration matches. +// NOTE: By extension this method takes care of detecting changes in other fields of the K3sConfig configuration (e.g. Files, Mounts etc.) +func matchInitOrJoinConfiguration(machineConfig *infrabootstrapv1.K3sConfig, kcp *infracontrolplanev1.K3sControlPlane) bool { + if machineConfig == nil { + // Return true here because failing to get K3sConfig should not be considered as unmatching. + // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. + return true + } + + // takes the K3sConfigSpec from KCP and applies the transformations required + // to allow a comparison with the K3sConfig referenced from the machine. + kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) + + // Default both K3sConfigSpecs before comparison. + // *Note* This assumes that newly added default values never + // introduce a semantic difference to the unset value. + // But that is something that is ensured by our API guarantees. + infrabootstrapv1.DefaultK3sConfigSpec(kcpConfig) + infrabootstrapv1.DefaultK3sConfigSpec(&machineConfig.Spec) + + // cleanups all the fields that are not relevant for the comparison. + cleanupConfigFields(kcpConfig, machineConfig) + + return reflect.DeepEqual(&machineConfig.Spec, kcpConfig) +} + +// getAdjustedKcpConfig takes the K3sConfigSpec from KCP and applies the transformations required +// to allow a comparison with the K3sConfig referenced from the machine. +// NOTE: The KCP controller applies a set of transformations when creating a K3sConfig referenced from the machine, +// mostly depending on the fact that the machine was the initial control plane node or a joining control plane node. +// In this function we don't have such information, so we are making the K3sConfigSpec similar to the KubeadmConfig. +func getAdjustedKcpConfig(kcp *infracontrolplanev1.K3sControlPlane, machineConfig *infrabootstrapv1.K3sConfig) *infrabootstrapv1.K3sConfigSpec { + kcpConfig := kcp.Spec.K3sConfigSpec.DeepCopy() + + // Machine's join configuration is nil when it is the first machine in the control plane. + if machineConfig.Spec.AgentConfiguration == nil { + kcpConfig.AgentConfiguration = nil + } + + // Machine's init configuration is nil when the control plane is already initialized. + if machineConfig.Spec.ServerConfiguration == nil { + kcpConfig.ServerConfiguration = nil + } + + return kcpConfig +} + +// cleanupConfigFields cleanups all the fields that are not relevant for the comparison. +func cleanupConfigFields(kcpConfig *infrabootstrapv1.K3sConfigSpec, machineConfig *infrabootstrapv1.K3sConfig) { + // KCP ClusterConfiguration will only be compared with a machine's ClusterConfiguration annotation, so + // we are cleaning up from the reflect.DeepEqual comparison. + kcpConfig.Cluster = nil + machineConfig.Spec.Cluster = nil + + // If KCP JoinConfiguration is not present, set machine JoinConfiguration to nil (nothing can trigger rollout here). + // NOTE: this is required because CABPK applies an empty joinConfiguration in case no one is provided. + if kcpConfig.AgentConfiguration == nil { + machineConfig.Spec.AgentConfiguration = nil + } + + // Cleanup JoinConfiguration.Discovery from kcpConfig and machineConfig, because those info are relevant only for + // the join process and not for comparing the configuration of the machine. + emptyDiscovery := &infrabootstrapv1.Cluster{} + if kcpConfig.Cluster != nil { + kcpConfig.Cluster = emptyDiscovery + } + if machineConfig.Spec.Cluster != nil { + machineConfig.Spec.Cluster = emptyDiscovery + } + + // If KCP JoinConfiguration.ControlPlane is not present, set machine join configuration to nil (nothing can trigger rollout here). + // NOTE: this is required because CABPK applies an empty joinConfiguration.ControlPlane in case no one is provided. + if kcpConfig.Cluster != nil && kcpConfig.Cluster.Server == "" && + machineConfig.Spec.Cluster != nil { + machineConfig.Spec.Cluster.Server = "" + } +} + +// matchMachineTemplateMetadata matches the machine template object meta information, +// specifically annotations and labels, against an object. +func matchMachineTemplateMetadata(kcp *infracontrolplanev1.K3sControlPlane, obj client.Object) bool { + // Check if annotations and labels match. + if !isSubsetMapOf(kcp.Spec.MachineTemplate.ObjectMeta.Annotations, obj.GetAnnotations()) { + return false + } + if !isSubsetMapOf(kcp.Spec.MachineTemplate.ObjectMeta.Labels, obj.GetLabels()) { + return false + } + return true +} + +func isSubsetMapOf(base map[string]string, existing map[string]string) bool { +loopBase: + for key, value := range base { + for existingKey, existingValue := range existing { + if existingKey == key && existingValue == value { + continue loopBase + } + } + // Return false right away if a key value pair wasn't found. + return false + } + return true +} diff --git a/controlplane/k3s/pkg/cluster/workload_cluster.go b/controlplane/k3s/pkg/cluster/workload_cluster.go new file mode 100644 index 00000000..9c8c95af --- /dev/null +++ b/controlplane/k3s/pkg/cluster/workload_cluster.go @@ -0,0 +1,105 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/cluster-api/util" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200 + labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane" +) + +// WorkloadCluster defines all behaviors necessary to upgrade kubernetes on a workload cluster +// +// TODO: Add a detailed description to each of these method definitions. +type WorkloadCluster interface { + // Basic health and status checks. + ClusterStatus(ctx context.Context) (Status, error) + UpdateAgentConditions(ctx context.Context, controlPlane *ControlPlane) + UpdateEtcdConditions(ctx context.Context, controlPlane *ControlPlane) +} + +// Workload defines operations on workload clusters. +type Workload struct { + Client ctrlclient.Client +} + +// Status holds stats information about the cluster. +type Status struct { + // Nodes are a total count of nodes + Nodes int32 + // ReadyNodes are the count of nodes that are reporting ready + ReadyNodes int32 + // HasK3sConfig will be true if the kubeadm config map has been uploaded, false otherwise. + HasK3sConfig bool +} + +func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) { + controlPlaneNodes := &corev1.NodeList{} + controlPlaneNodeNames := sets.NewString() + + for _, label := range []string{labelNodeRoleOldControlPlane, labelNodeRoleControlPlane} { + nodes := &corev1.NodeList{} + if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(map[string]string{ + label: "", + })); err != nil { + return nil, err + } + + for i := range nodes.Items { + node := nodes.Items[i] + + // Continue if we already added that node. + if controlPlaneNodeNames.Has(node.Name) { + continue + } + + controlPlaneNodeNames.Insert(node.Name) + controlPlaneNodes.Items = append(controlPlaneNodes.Items, node) + } + } + + return controlPlaneNodes, nil +} + +// ClusterStatus returns the status of the cluster. +func (w *Workload) ClusterStatus(ctx context.Context) (Status, error) { + status := Status{} + + // count the control plane nodes + nodes, err := w.getControlPlaneNodes(ctx) + if err != nil { + return status, err + } + + for _, node := range nodes.Items { + nodeCopy := node + status.Nodes++ + if util.IsNodeReady(&nodeCopy) { + status.ReadyNodes++ + } + } + + return status, nil +} diff --git a/controlplane/k3s/pkg/cluster/workload_cluster_conditions.go b/controlplane/k3s/pkg/cluster/workload_cluster_conditions.go new file mode 100644 index 00000000..e5e4a90f --- /dev/null +++ b/controlplane/k3s/pkg/cluster/workload_cluster_conditions.go @@ -0,0 +1,301 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cluster + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1" +) + +// UpdateEtcdConditions is responsible for updating machine conditions reflecting the status of all the etcd members. +// This operation is best effort, in the sense that in case of problems in retrieving member status, it sets +// the condition to Unknown state without returning any error. +func (w *Workload) UpdateEtcdConditions(ctx context.Context, controlPlane *ControlPlane) { + w.updateManagedEtcdConditions(ctx, controlPlane) +} + +func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane *ControlPlane) { + // NOTE: This methods uses control plane nodes only to get in contact with etcd but then it relies on etcd + // as ultimate source of truth for the list of members and for their health. + controlPlaneNodes, err := w.getControlPlaneNodes(ctx) + if err != nil { + conditions.MarkUnknown(controlPlane.KCP, infracontrolplanev1.EtcdClusterHealthyCondition, infracontrolplanev1.EtcdClusterInspectionFailedReason, "Failed to list nodes which are hosting the etcd members") + for _, m := range controlPlane.Machines { + conditions.MarkUnknown(m, infracontrolplanev1.MachineEtcdMemberHealthyCondition, infracontrolplanev1.EtcdMemberInspectionFailedReason, "Failed to get the node which is hosting the etcd member") + } + return + } + + for _, node := range controlPlaneNodes.Items { + var machine *clusterv1.Machine + for _, m := range controlPlane.Machines { + if m.Status.NodeRef != nil && m.Status.NodeRef.Name == node.Name { + machine = m + } + } + + if machine == nil { + // If there are machines still provisioning there is the chance that a chance that a node might be linked to a machine soon, + // otherwise report the error at KCP level given that there is no machine to report on. + if hasProvisioningMachine(controlPlane.Machines) { + continue + } + continue + } + + // If the machine is deleting, report all the conditions as deleting + if !machine.ObjectMeta.DeletionTimestamp.IsZero() { + conditions.MarkFalse(machine, infracontrolplanev1.MachineEtcdMemberHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + continue + } + + conditions.MarkTrue(machine, infracontrolplanev1.MachineEtcdMemberHealthyCondition) + } +} + +// UpdateAgentConditions is responsible for updating machine conditions reflecting the status of all the control plane +// components running in a static pod generated by kubeadm. This operation is best effort, in the sense that in case +// of problems in retrieving the pod status, it sets the condition to Unknown state without returning any error. +func (w *Workload) UpdateAgentConditions(ctx context.Context, controlPlane *ControlPlane) { + allMachinePodConditions := []clusterv1.ConditionType{ + infracontrolplanev1.MachineAgentHealthyCondition, + } + + // NOTE: this fun uses control plane nodes from the workload cluster as a source of truth for the current state. + controlPlaneNodes, err := w.getControlPlaneNodes(ctx) + if err != nil { + for i := range controlPlane.Machines { + machine := controlPlane.Machines[i] + for _, condition := range allMachinePodConditions { + conditions.MarkUnknown(machine, condition, infracontrolplanev1.PodInspectionFailedReason, "Failed to get the node which is hosting this component") + } + } + conditions.MarkUnknown(controlPlane.KCP, infracontrolplanev1.ControlPlaneComponentsHealthyCondition, infracontrolplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list nodes which are hosting control plane components") + return + } + + // Update conditions for control plane components hosted as static pods on the nodes. + var kcpErrors []string + + for _, node := range controlPlaneNodes.Items { + // Search for the machine corresponding to the node. + var machine *clusterv1.Machine + for _, m := range controlPlane.Machines { + if m.Status.NodeRef != nil && m.Status.NodeRef.Name == node.Name { + machine = m + break + } + } + + // If there is no machine corresponding to a node, determine if this is an error or not. + if machine == nil { + // If there are machines still provisioning there is the chance that a chance that a node might be linked to a machine soon, + // otherwise report the error at KCP level given that there is no machine to report on. + if hasProvisioningMachine(controlPlane.Machines) { + continue + } + kcpErrors = append(kcpErrors, fmt.Sprintf("Control plane node %s does not have a corresponding machine", node.Name)) + continue + } + + // If the machine is deleting, report all the conditions as deleting + if !machine.ObjectMeta.DeletionTimestamp.IsZero() { + for _, condition := range allMachinePodConditions { + conditions.MarkFalse(machine, condition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + } + continue + } + + // If the node is Unreachable, information about static pods could be stale so set all conditions to unknown. + if nodeHasUnreachableTaint(node) { + // NOTE: We are assuming unreachable as a temporary condition, leaving to MHC + // the responsibility to determine if the node is unhealthy or not. + for _, condition := range allMachinePodConditions { + conditions.MarkUnknown(machine, condition, infracontrolplanev1.PodInspectionFailedReason, "Node is unreachable") + } + continue + } + + targetnode := corev1.Node{} + nodeKey := ctrlclient.ObjectKey{ + Namespace: metav1.NamespaceSystem, + Name: node.Name, + } + + if err := w.Client.Get(ctx, nodeKey, &targetnode); err != nil { + // If there is an error getting the Pod, do not set any conditions. + if apierrors.IsNotFound(err) { + conditions.MarkFalse(machine, infracontrolplanev1.MachineAgentHealthyCondition, infracontrolplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Node %s is missing", nodeKey.Name) + + return + } + conditions.MarkUnknown(machine, infracontrolplanev1.MachineAgentHealthyCondition, infracontrolplanev1.PodInspectionFailedReason, "Failed to get node status") + return + } + + for _, condition := range targetnode.Status.Conditions { + if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { + conditions.MarkTrue(machine, infracontrolplanev1.MachineAgentHealthyCondition) + } + } + } + + // If there are provisioned machines without corresponding nodes, report this as a failing conditions with SeverityError. + for i := range controlPlane.Machines { + machine := controlPlane.Machines[i] + if machine.Status.NodeRef == nil { + continue + } + found := false + for _, node := range controlPlaneNodes.Items { + if machine.Status.NodeRef.Name == node.Name { + found = true + break + } + } + if !found { + for _, condition := range allMachinePodConditions { + conditions.MarkFalse(machine, condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing node") + } + } + } + + // Aggregate components error from machines at KCP level. + aggregateFromMachinesToKCP(aggregateFromMachinesToKCPInput{ + controlPlane: controlPlane, + machineConditions: allMachinePodConditions, + kcpErrors: kcpErrors, + condition: controlplanev1.ControlPlaneComponentsHealthyCondition, + unhealthyReason: controlplanev1.ControlPlaneComponentsUnhealthyReason, + unknownReason: controlplanev1.ControlPlaneComponentsUnknownReason, + note: "control plane", + }) +} + +func hasProvisioningMachine(machines collections.Machines) bool { + for _, machine := range machines { + if machine.Status.NodeRef == nil { + return true + } + } + return false +} + +// nodeHasUnreachableTaint returns true if the node has is unreachable from the node controller. +func nodeHasUnreachableTaint(node corev1.Node) bool { + for _, taint := range node.Spec.Taints { + if taint.Key == corev1.TaintNodeUnreachable && taint.Effect == corev1.TaintEffectNoExecute { + return true + } + } + return false +} + +type aggregateFromMachinesToKCPInput struct { + controlPlane *ControlPlane + machineConditions []clusterv1.ConditionType + kcpErrors []string + condition clusterv1.ConditionType + unhealthyReason string + unknownReason string + note string +} + +// aggregateFromMachinesToKCP aggregates a group of conditions from machines to KCP. +// NOTE: this func follows the same aggregation rules used by conditions.Merge thus giving priority to +// errors, then warning, info down to unknown. +func aggregateFromMachinesToKCP(input aggregateFromMachinesToKCPInput) { + // Aggregates machines for condition status. + // NB. A machine could be assigned to many groups, but only the group with the highest severity will be reported. + kcpMachinesWithErrors := sets.NewString() + kcpMachinesWithWarnings := sets.NewString() + kcpMachinesWithInfo := sets.NewString() + kcpMachinesWithTrue := sets.NewString() + kcpMachinesWithUnknown := sets.NewString() + + for i := range input.controlPlane.Machines { + machine := input.controlPlane.Machines[i] + for _, condition := range input.machineConditions { + if machineCondition := conditions.Get(machine, condition); machineCondition != nil { + switch machineCondition.Status { + case corev1.ConditionTrue: + kcpMachinesWithTrue.Insert(machine.Name) + case corev1.ConditionFalse: + switch machineCondition.Severity { + case clusterv1.ConditionSeverityInfo: + kcpMachinesWithInfo.Insert(machine.Name) + case clusterv1.ConditionSeverityWarning: + kcpMachinesWithWarnings.Insert(machine.Name) + case clusterv1.ConditionSeverityError: + kcpMachinesWithErrors.Insert(machine.Name) + } + case corev1.ConditionUnknown: + kcpMachinesWithUnknown.Insert(machine.Name) + } + } + } + } + + // In case of at least one machine with errors or KCP level errors (nodes without machines), report false, error. + if len(kcpMachinesWithErrors) > 0 { + input.kcpErrors = append(input.kcpErrors, fmt.Sprintf("Following machines are reporting %s errors: %s", input.note, strings.Join(kcpMachinesWithErrors.List(), ", "))) + } + if len(input.kcpErrors) > 0 { + conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityError, strings.Join(input.kcpErrors, "; ")) + return + } + + // In case of no errors and at least one machine with warnings, report false, warnings. + if len(kcpMachinesWithWarnings) > 0 { + conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityWarning, "Following machines are reporting %s warnings: %s", input.note, strings.Join(kcpMachinesWithWarnings.List(), ", ")) + return + } + + // In case of no errors, no warning, and at least one machine with info, report false, info. + if len(kcpMachinesWithWarnings) > 0 { + conditions.MarkFalse(input.controlPlane.KCP, input.condition, input.unhealthyReason, clusterv1.ConditionSeverityWarning, "Following machines are reporting %s info: %s", input.note, strings.Join(kcpMachinesWithInfo.List(), ", ")) + return + } + + // In case of no errors, no warning, no Info, and at least one machine with true conditions, report true. + if len(kcpMachinesWithTrue) > 0 { + conditions.MarkTrue(input.controlPlane.KCP, input.condition) + return + } + + // Otherwise, if there is at least one machine with unknown, report unknown. + if len(kcpMachinesWithUnknown) > 0 { + conditions.MarkUnknown(input.controlPlane.KCP, input.condition, input.unknownReason, "Following machines are reporting unknown %s status: %s", input.note, strings.Join(kcpMachinesWithUnknown.List(), ", ")) + return + } + + // This last case should happen only if there are no provisioned machines, and thus without conditions. + // So there will be no condition at KCP level too. +} diff --git a/go.mod b/go.mod index cb753d6a..c711336c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubesphere/kubekey -go 1.18 +go 1.19 replace ( github.com/docker/distribution => github.com/docker/distribution v2.8.1+incompatible @@ -8,11 +8,13 @@ replace ( ) require ( + github.com/blang/semver v3.5.1+incompatible github.com/containerd/containerd v1.6.6 github.com/containers/image/v5 v5.23.0 github.com/deckarep/golang-set v1.8.0 github.com/dominodatalab/os-release v0.0.0-20190522011736-bcdb4a3e3c2f github.com/estesp/manifest-tool/v2 v2.0.3 + github.com/evanphx/json-patch v4.12.0+incompatible github.com/go-logr/logr v1.2.3 github.com/google/go-cmp v0.5.9 github.com/hashicorp/go-getter v1.6.2 @@ -35,16 +37,19 @@ require ( golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.9.4 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/apiserver v0.24.2 - k8s.io/cli-runtime v0.24.2 - k8s.io/client-go v0.24.2 - k8s.io/klog/v2 v2.60.1 - k8s.io/kubectl v0.24.2 - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 - sigs.k8s.io/cluster-api v1.2.0 - sigs.k8s.io/cluster-api/test v1.2.0 + k8s.io/api v0.25.0 + k8s.io/apiextensions-apiserver v0.25.0 + k8s.io/apimachinery v0.25.0 + k8s.io/apiserver v0.25.0 + k8s.io/cli-runtime v0.25.0 + k8s.io/client-go v0.25.0 + k8s.io/cluster-bootstrap v0.25.0 + k8s.io/component-base v0.25.0 + k8s.io/klog/v2 v2.70.1 + k8s.io/kubectl v0.25.0 + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed + sigs.k8s.io/cluster-api v1.2.4 + sigs.k8s.io/cluster-api/test v1.2.4 sigs.k8s.io/controller-runtime v0.12.3 sigs.k8s.io/yaml v1.3.0 ) @@ -74,14 +79,14 @@ require ( github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/alessio/shellescape v1.4.1 // indirect - github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e // indirect + github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/aws/aws-sdk-go v1.44.102 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect - github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect @@ -101,7 +106,6 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect @@ -111,7 +115,6 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-gorp/gorp/v3 v3.0.2 // indirect - github.com/go-logr/zapr v1.2.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect @@ -122,7 +125,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.10.1 // indirect + github.com/google/cel-go v0.12.4 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-containerregistry v0.11.0 // indirect github.com/google/go-github/v45 v45.2.0 // indirect @@ -191,7 +194,6 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/client_model v0.2.0 // indirect @@ -208,7 +210,6 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/stretchr/testify v1.8.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect github.com/sylabs/sif/v2 v2.8.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect @@ -222,14 +223,11 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect + github.com/xlab/treeprint v1.1.0 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.23.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.19.1 // indirect golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect @@ -249,14 +247,11 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/cluster-bootstrap v0.24.0 // indirect - k8s.io/component-base v0.24.2 // indirect - k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect oras.land/oras-go v1.2.0 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/kind v0.14.0 // indirect - sigs.k8s.io/kustomize/api v0.11.4 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/kustomize/api v0.12.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 27abc1ae..418d4617 100644 --- a/go.sum +++ b/go.sum @@ -74,12 +74,10 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= @@ -100,7 +98,6 @@ github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0 github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -137,7 +134,6 @@ github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfy github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -159,23 +155,19 @@ github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVK github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed h1:ue9pVfIcP+QMEjfgo/Ez4ZjNZfonGgR6NgjMaJMu1Cg= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.44.102 h1:6tUCTGL2UDbFZae1TLGk8vTgeXuzkb8KbAe2FiAeKHc= github.com/aws/aws-sdk-go v1.44.102/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -189,6 +181,7 @@ github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqO github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -196,14 +189,12 @@ github.com/bugsnag/bugsnag-go v2.1.2+incompatible h1:E7dor84qzwUO8KdCM68CZwq9QOS github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3pU= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -228,9 +219,6 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -368,7 +356,6 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/deislabs/oras v0.9.0 h1:R6PRN3bTruUjHcGKgdteurzbpsCxwf3XJCLsxLFyBuU= @@ -426,7 +413,6 @@ github.com/estesp/manifest-tool/v2 v2.0.3 h1:F9HMOqcXvtW+8drQB+BjNRU/+bLXOwCfj3m github.com/estesp/manifest-tool/v2 v2.0.3/go.mod h1:Suh+tbKQvKHcs4Vltzy8gwZk1y9eSRI635gT4gFw5Ss= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= @@ -442,10 +428,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -456,8 +440,6 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -481,8 +463,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -540,7 +521,6 @@ github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQA github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -576,15 +556,13 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1 h1:MQBGSZGnDwh7T/un+mzGKOMz3x+4E/GDPprWjDL+1Jg= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/cel-go v0.12.4 h1:YINKfuHZ8n72tPOqSPZBwGiDpew2CJS48mdM5W8LZQU= +github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -659,7 +637,6 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -678,7 +655,6 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWet github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -849,7 +825,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -935,7 +910,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -946,7 +920,6 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= @@ -992,7 +965,6 @@ github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuh github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1074,7 +1046,6 @@ github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUA github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rubenv/sql-migrate v1.1.1 h1:haR5Hn8hbW9/SpAICrXoZqXnywS7Q5WijwkQENPeNWY= github.com/rubenv/sql-migrate v1.1.1/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1087,7 +1058,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1111,7 +1081,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1183,7 +1152,6 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= @@ -1218,8 +1186,8 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1227,7 +1195,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= @@ -1240,15 +1207,8 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1260,17 +1220,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= @@ -1278,18 +1227,13 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go4.org v0.0.0-20201209231011-d4a079459e60 h1:iqAGo78tVOJXELHQFRjR6TMwItrvXH4hrGJ32I/NFF8= go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= @@ -1316,7 +1260,6 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY= golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1355,7 +1298,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1398,7 +1340,6 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1409,7 +1350,6 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1511,7 +1451,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1555,11 +1494,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1603,8 +1540,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1629,7 +1564,6 @@ golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1675,7 +1609,6 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1760,7 +1693,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1773,7 +1705,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1807,7 +1738,6 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= @@ -1945,39 +1875,35 @@ inet.af/netaddr v0.0.0-20220617031823-097006376321 h1:B4dC8ySKTQXasnjDTMsoCMf1sQ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= +k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= +k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= +k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= +k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.24.2 h1:orxipm5elPJSkkFNlwH9ClqaKEDJJA3yR2cAAlCnyj4= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/cli-runtime v0.24.2 h1:KxY6tSgPGsahA6c1/dmR3uF5jOxXPx2QQY6C5ZrLmtE= -k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= +k8s.io/apiserver v0.25.0 h1:8kl2ifbNffD440MyvHtPaIz1mw4mGKVgWqM0nL+oyu4= +k8s.io/apiserver v0.25.0/go.mod h1:BKwsE+PTC+aZK+6OJQDPr0v6uS91/HWxX7evElAH6xo= +k8s.io/cli-runtime v0.25.0 h1:XBnTc2Fi+w818jcJGzhiJKQuXl8479sZ4FhtV5hVJ1Q= +k8s.io/cli-runtime v0.25.0/go.mod h1:bHOI5ZZInRHhbq12OdUiYZQN8ml8aKZLwQgt9QlLINw= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/cluster-bootstrap v0.24.0 h1:MTs2x3Vfcl/PWvB5bfX7gzTFRyi4ZSbNSQgGJTCb6Sw= -k8s.io/cluster-bootstrap v0.24.0/go.mod h1:xw+IfoaUweMCAoi+VYhmqkcjii2G7gNg59dmGn7hi0g= +k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= +k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/cluster-bootstrap v0.25.0 h1:KJ2/r0dV+bLfTK5EBobAVKvjGel3N4Qqh3bvnzh9qPk= +k8s.io/cluster-bootstrap v0.25.0/go.mod h1:x/TCtY3EiuR/rODkA3SvVQT3uSssQLf9cXcmSjdDTe0= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= +k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= +k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -1985,27 +1911,21 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 h1:yEQKdMCjzAOvGeiTwG4hO/hNVNtDOuUFvMUZ0OlaIzs= -k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8/go.mod h1:mbJ+NSUoAhuR14N0S63bPkh8MGVSo3VYSGZtH/mfMe0= -k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= -k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kubectl v0.25.0 h1:/Wn1cFqo8ik3iee1EvpxYre3bkWsGLXzLQI6uCCAkQc= +k8s.io/kubectl v0.25.0/go.mod h1:n16ULWsOl2jmQpzt2o7Dud1t4o0+Y186ICb4O+GwKAU= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= oras.land/oras-go v1.2.0 h1:yoKosVIbsPoFMqAIFHTnrmOuafHal+J/r+I5bdbVWu4= oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -2014,28 +1934,25 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/cluster-api v1.2.0 h1:jAKG78SQ6nUHfqbRD64FrYZR79P+J7/aiH17zbpsWcQ= -sigs.k8s.io/cluster-api v1.2.0/go.mod h1:oiuV+mlCV1QxDnuI+PfElFlAfuXHo9ZGVBojoihVtHY= -sigs.k8s.io/cluster-api/test v1.2.0 h1:l5rQajf3yg9kYbMTVpKX4y2j0SQixEdCqLx9ANrNvF8= -sigs.k8s.io/cluster-api/test v1.2.0/go.mod h1:JdMqpv9rEOFWQVQ8danpBduqxoQkZMiOvpIGJ7v8qjw= +sigs.k8s.io/cluster-api v1.2.4 h1:wxfm/p8y+Q3qWVkkIPAIVqabA5lJVvqoRA02Nhup3uk= +sigs.k8s.io/cluster-api v1.2.4/go.mod h1:YaLJOC9mSsIOpdbh7BpthGmC8uxIJADzrMMIGpgahfM= +sigs.k8s.io/cluster-api/test v1.2.4 h1:hdYZ8HcQU2kYguE90EreRtzh7Zg6/tG9vW6JafdKX6M= +sigs.k8s.io/cluster-api/test v1.2.4/go.mod h1:C+UT2CXWNu3eAoeI0HHI19ex90pAdzAqR6YjhxRNHyM= sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.14.0 h1:cNmI3jGBvp7UegEGbC5we8plDtCUmaNRL+bod7JoSCE= sigs.k8s.io/kind v0.14.0/go.mod h1:UrFRPHG+2a5j0Q7qiR4gtJ4rEyn8TuMQwuOPf+m4oHg= -sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= +sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= +sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= +sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= +sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index c3681aba..62802d18 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 The KubeSphere Authors. +Copyright 2022 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/main.go b/main.go index b815292e..dd03ee07 100644 --- a/main.go +++ b/main.go @@ -59,7 +59,7 @@ var ( metricsAddr string enableLeaderElection bool leaderElectionNamespace string - probeAddr string + healthAddr string watchFilterValue string kkClusterConcurrency int kkInstanceConcurrency int @@ -93,7 +93,7 @@ func main() { SyncPeriod: &syncPeriod, Namespace: watchNamespace, Port: 9443, - HealthProbeBindAddress: probeAddr, + HealthProbeBindAddress: healthAddr, }) if err != nil { setupLog.Error(err, "unable to start manager") @@ -235,12 +235,10 @@ func initFlags(fs *pflag.FlagSet) { "Number of KKMachines to process simultaneously.", ) - fs.StringVar( - &probeAddr, - "health-probe-bind-address", - ":8081", - "The address the probe endpoint binds to.", - ) + fs.StringVar(&healthAddr, + "health-addr", + ":9440", + "The address the health endpoint binds to.") fs.StringVar( &watchFilterValue, diff --git a/pkg/interface.go b/pkg/interface.go index 165c1e42..2ab99d1d 100644 --- a/pkg/interface.go +++ b/pkg/interface.go @@ -85,6 +85,8 @@ type ClusterScoper interface { InfraClusterName() string // KubernetesClusterName is the name of the Kubernetes cluster. KubernetesClusterName() string + // Distribution returns Kubernetes distribution of the cluster. + Distribution() string // RootFs is the cluster scope rootfs RootFs() rootfs.Interface // PatchObject persists the cluster configuration and status. diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index 7cdcb945..0daa5062 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -224,6 +224,11 @@ func (s *ClusterScope) ControllerName() string { return s.controllerName } +// Distribution returns Kubernetes distribution of the cluster. +func (s *ClusterScope) Distribution() string { + return s.KKCluster.Spec.Distribution +} + // RootFs returns the CAPKK rootfs interface. func (s *ClusterScope) RootFs() rootfs.Interface { return s.rootFs diff --git a/pkg/service/binary/interface.go b/pkg/service/binary/interface.go new file mode 100644 index 00000000..7973616d --- /dev/null +++ b/pkg/service/binary/interface.go @@ -0,0 +1,44 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package binary define the binaries operations on the remote instance. +package binary + +import ( + "time" + + infrav1 "github.com/kubesphere/kubekey/api/v1beta1" + "github.com/kubesphere/kubekey/pkg/clients/ssh" + "github.com/kubesphere/kubekey/pkg/scope" + "github.com/kubesphere/kubekey/pkg/service/binary/k3s" + "github.com/kubesphere/kubekey/pkg/service/binary/kubernetes" +) + +// Binary defines the interface for the binaries operations. +type Binary interface { + Download(timeout time.Duration) error +} + +// NewService returns a new service for the binaries operations. +func NewService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope, distribution string) Binary { + switch distribution { + case infrav1.KUBERNETES: + return kubernetes.NewService(sshClient, scope, instanceScope) + case infrav1.K3S: + return k3s.NewService(sshClient, scope, instanceScope) + } + return nil +} diff --git a/pkg/service/binary/k3s/binary.go b/pkg/service/binary/k3s/binary.go new file mode 100644 index 00000000..51cdd048 --- /dev/null +++ b/pkg/service/binary/k3s/binary.go @@ -0,0 +1,78 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package k3s + +import ( + "path/filepath" + "time" + + infrav1 "github.com/kubesphere/kubekey/api/v1beta1" + "github.com/kubesphere/kubekey/pkg/service/operation" + "github.com/kubesphere/kubekey/pkg/service/operation/file" + "github.com/kubesphere/kubekey/pkg/service/util" +) + +// Download downloads binaries. +func (s *Service) Download(timeout time.Duration) error { + if err := s.DownloadAll(timeout); err != nil { + return err + } + return nil +} + +// DownloadAll downloads all binaries. +func (s *Service) DownloadAll(timeout time.Duration) error { + k3s, err := s.getK3sService(s.instanceScope.KubernetesVersion(), s.instanceScope.Arch()) + if err != nil { + return err + } + kubecni, err := s.getKubecniService(file.KubecniDefaultVersion, s.instanceScope.Arch()) + if err != nil { + return err + } + + binaries := []operation.Binary{ + k3s, + kubecni, + } + + zone := s.scope.ComponentZone() + host := s.scope.ComponentHost() + overrideMap := make(map[string]infrav1.Override) + for _, o := range s.scope.ComponentOverrides() { + overrideMap[o.ID+o.Version+o.Arch] = o + } + + for _, b := range binaries { + s.instanceScope.V(4).Info("download binary", "binary", b.Name(), "version", b.Version(), + "url", b.URL().String()) + + override := overrideMap[b.ID()+b.Version()+b.Arch()] + if err := util.DownloadAndCopy(b, zone, host, override.Path, override.URL, override.Checksum.Value, timeout); err != nil { + return err + } + if err := b.Chmod("+x"); err != nil { + return err + } + } + + if _, err := s.sshClient.SudoCmdf("tar Cxzvf %s %s", filepath.Dir(kubecni.RemotePath()), kubecni.RemotePath()); err != nil { + return err + } + + return nil +} diff --git a/pkg/service/binary/k3s/doc.go b/pkg/service/binary/k3s/doc.go new file mode 100644 index 00000000..bbf12ea5 --- /dev/null +++ b/pkg/service/binary/k3s/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package k3s define the binaries operations on the remote instance. +package k3s diff --git a/pkg/service/binary/k3s/service.go b/pkg/service/binary/k3s/service.go new file mode 100644 index 00000000..435ae5cf --- /dev/null +++ b/pkg/service/binary/k3s/service.go @@ -0,0 +1,58 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package k3s + +import ( + "github.com/kubesphere/kubekey/pkg/clients/ssh" + "github.com/kubesphere/kubekey/pkg/scope" + "github.com/kubesphere/kubekey/pkg/service/operation" + "github.com/kubesphere/kubekey/pkg/service/operation/file" +) + +// Service holds a collection of interfaces. +// The interfaces are broken down like this to group functions together. +type Service struct { + sshClient ssh.Interface + scope scope.KKInstanceScope + instanceScope *scope.InstanceScope + + k3sFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error) + kubecniFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error) +} + +// NewService returns a new service given the remote instance. +func NewService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) *Service { + return &Service{ + sshClient: sshClient, + scope: scope, + instanceScope: instanceScope, + } +} + +func (s *Service) getK3sService(version, arch string) (operation.Binary, error) { + if s.k3sFactory != nil { + return s.k3sFactory(s.sshClient, version, arch) + } + return file.NewK3s(s.sshClient, s.scope.RootFs(), version, arch) +} + +func (s *Service) getKubecniService(version, arch string) (operation.Binary, error) { + if s.kubecniFactory != nil { + return s.kubecniFactory(s.sshClient, version, arch) + } + return file.NewKubecni(s.sshClient, s.scope.RootFs(), version, arch) +} diff --git a/pkg/service/binary/binary.go b/pkg/service/binary/kubernetes/binary.go similarity index 92% rename from pkg/service/binary/binary.go rename to pkg/service/binary/kubernetes/binary.go index ef5eb85a..e476bba0 100644 --- a/pkg/service/binary/binary.go +++ b/pkg/service/binary/kubernetes/binary.go @@ -14,7 +14,7 @@ limitations under the License. */ -package binary +package kubernetes import ( "embed" @@ -31,6 +31,23 @@ import ( //go:embed templates var f embed.FS +// Download downloads binaries. +func (s *Service) Download(timeout time.Duration) error { + if err := s.DownloadAll(timeout); err != nil { + return err + } + + if err := s.ConfigureKubelet(); err != nil { + return err + } + + if err := s.ConfigureKubeadm(); err != nil { + return err + } + + return nil +} + // DownloadAll downloads all binaries. func (s *Service) DownloadAll(timeout time.Duration) error { kubeadm, err := s.getKubeadmService(s.instanceScope.KubernetesVersion(), s.instanceScope.Arch()) diff --git a/pkg/service/binary/kubernetes/doc.go b/pkg/service/binary/kubernetes/doc.go new file mode 100644 index 00000000..eae16622 --- /dev/null +++ b/pkg/service/binary/kubernetes/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package kubernetes define the binaries operations on the remote instance. +package kubernetes diff --git a/pkg/service/binary/service.go b/pkg/service/binary/kubernetes/service.go similarity index 99% rename from pkg/service/binary/service.go rename to pkg/service/binary/kubernetes/service.go index 2ed68121..7a28d2b3 100644 --- a/pkg/service/binary/service.go +++ b/pkg/service/binary/kubernetes/service.go @@ -14,7 +14,7 @@ limitations under the License. */ -package binary +package kubernetes import ( "text/template" diff --git a/pkg/service/binary/templates/kubelet.conf b/pkg/service/binary/kubernetes/templates/kubelet.conf similarity index 100% rename from pkg/service/binary/templates/kubelet.conf rename to pkg/service/binary/kubernetes/templates/kubelet.conf diff --git a/pkg/service/binary/templates/kubelet.service b/pkg/service/binary/kubernetes/templates/kubelet.service similarity index 100% rename from pkg/service/binary/templates/kubelet.service rename to pkg/service/binary/kubernetes/templates/kubelet.service diff --git a/pkg/service/interface.go b/pkg/service/interface.go index 98c7f606..0c6cdce0 100644 --- a/pkg/service/interface.go +++ b/pkg/service/interface.go @@ -46,9 +46,7 @@ type Repository interface { // BinaryService is the interface for binary provision. type BinaryService interface { - DownloadAll(timeout time.Duration) error - ConfigureKubelet() error - ConfigureKubeadm() error + Download(timeout time.Duration) error } // ContainerManager is the interface for container manager provision. diff --git a/pkg/service/operation/file/k3s.go b/pkg/service/operation/file/k3s.go new file mode 100644 index 00000000..af1d2949 --- /dev/null +++ b/pkg/service/operation/file/k3s.go @@ -0,0 +1,80 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package file + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/kubesphere/kubekey/pkg/clients/ssh" + "github.com/kubesphere/kubekey/pkg/rootfs" +) + +// K3s info +const ( + K3sName = "k3s" + K3sID = "k3s" + K3sURLPathTmpl = "/k3s-io/k3s/releases/download/%s+k3s1/k3s%s" + K3sURLPathTmplCN = "/k3s/releases/download/%s+k3s1/linux/%s/k3s" +) + +// K3s is a Binary for k3s. +type K3s struct { + *Binary +} + +// NewK3s returns a new K3s. +func NewK3s(sshClient ssh.Interface, rootFs rootfs.Interface, version, arch string) (*K3s, error) { + fileName := K3sName + file, err := NewFile(Params{ + SSHClient: sshClient, + RootFs: rootFs, + Type: FileBinary, + Name: fileName, + LocalFullPath: filepath.Join(rootFs.ClusterRootFsDir(), fileName), + RemoteFullPath: filepath.Join(BinDir, fileName), + }) + if err != nil { + return nil, err + } + + if arch == "amd64" { + arch = "" + } else { + arch = "-" + arch + } + + u := parseURL(DefaultDownloadHostGoogle, fmt.Sprintf(K3sURLPathTmpl, version, arch)) + binary := NewBinary(BinaryParams{ + File: file, + ID: K3sID, + Version: version, + Arch: arch, + URL: u, + }) + + return &K3s{binary}, nil +} + +// SetZone override Binary's SetZone method. +func (k *K3s) SetZone(zone string) { + if strings.EqualFold(zone, ZONE) { + k.SetHost(DefaultDownloadHostQingStor) + k.SetPath(fmt.Sprintf(K3sURLPathTmplCN, k.version, k.arch)) + } +} diff --git a/pkg/util/filesystem/fs.go b/pkg/util/filesystem/fs.go index b2611b46..b24893c0 100644 --- a/pkg/util/filesystem/fs.go +++ b/pkg/util/filesystem/fs.go @@ -18,7 +18,6 @@ package filesystem import ( "fmt" - "io/ioutil" "os" "github.com/kubesphere/kubekey/pkg/util/hash" @@ -63,7 +62,7 @@ func (f FileSystem) SHA256Sum(localPath string) string { // MkLocalTmpDir creates a temporary directory and returns the path func (f FileSystem) MkLocalTmpDir() (string, error) { - tempDir, err := ioutil.TempDir(DefaultLocalTmpDir, ".Tmp-") + tempDir, err := os.MkdirTemp(DefaultLocalTmpDir, ".Tmp-") if err != nil { return "", err } @@ -72,7 +71,7 @@ func (f FileSystem) MkLocalTmpDir() (string, error) { // MkLocalTmpFile creates a temporary file and returns the path. func (f FileSystem) MkLocalTmpFile(dir, pattern string) (string, error) { - file, err := ioutil.TempFile(dir, pattern) + file, err := os.CreateTemp(dir, pattern) if err != nil { return "", err } diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index cdc9c379..73354a23 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -6,10 +6,10 @@ metadata: spec: clusterNetwork: services: - cidrBlocks: ['${SERVICE_CIDRS}'] + cidrBlocks: ["10.233.0.0/18"] pods: - cidrBlocks: ['${POD_CIDRS}'] - serviceDomain: '${SERVICE_DOMAIN}' + cidrBlocks: ["10.233.64.0/18"] + serviceDomain: "cluster.local" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: KKCluster @@ -38,8 +38,6 @@ kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta1 metadata: name: "${CLUSTER_NAME}-control-plane" - labels: - kcp-adoption.step2: "" spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} machineTemplate: diff --git a/test/e2e/Makefile b/test/e2e/Makefile index cb3b575d..41787ad2 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -81,11 +81,13 @@ cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta ## Testing ## -------------------------------------- -GINKGO_FOCUS ?= +GINKGO_FOCUS ?= GINKGO_SKIP ?= -GINKGO_NODES ?= 1 -E2E_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/e2e_conf.yaml +GINKGO_NODES ?= 1 +E2E_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/e2e_conf.yaml +E2E_K3S_CONF_FILE ?= ${REPO_ROOT}/test/e2e/config/e2e_k3s_conf.yaml ARTIFACTS ?= ${REPO_ROOT}/_artifacts +ARTIFACTS_K3S ?= ${REPO_ROOT}/_artifacts_k3s SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false @@ -100,4 +102,11 @@ run: $(GINKGO) $(KIND) cluster-templates ## Run the end-to-end tests $(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \ -e2e.artifacts-folder="$(ARTIFACTS)" \ -e2e.config="$(E2E_CONF_FILE)" \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: run-k3s +run-k3s: $(GINKGO) $(KIND) cluster-templates ## Run the end-to-end tests + $(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \ + -e2e.artifacts-folder="$(ARTIFACTS_K3S)" \ + -e2e.config="$(E2E_K3S_CONF_FILE)" \ -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \ No newline at end of file diff --git a/test/e2e/common.go b/test/e2e/common.go index 7b4beb12..3187648d 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -24,19 +24,13 @@ import ( // Test suite constants for e2e config variables. const ( - KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT" - KubernetesVersion = "KUBERNETES_VERSION" - CNIPath = "CNI" - CNIResources = "CNI_RESOURCES" - KubernetesVersionUpgradeFrom = "KUBERNETES_VERSION_UPGRADE_FROM" - KubernetesVersionUpgradeTo = "KUBERNETES_VERSION_UPGRADE_TO" - CPMachineTemplateUpgradeTo = "CONTROL_PLANE_MACHINE_TEMPLATE_UPGRADE_TO" - WorkersMachineTemplateUpgradeTo = "WORKERS_MACHINE_TEMPLATE_UPGRADE_TO" - EtcdVersionUpgradeTo = "ETCD_VERSION_UPGRADE_TO" - CoreDNSVersionUpgradeTo = "COREDNS_VERSION_UPGRADE_TO" - IPFamily = "IP_FAMILY" + KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT" + CNIPath = "CNI" + CNIResources = "CNI_RESOURCES" + IPFamily = "IP_FAMILY" ) +// Byf is a wrapper around By that formats its arguments. func Byf(format string, a ...interface{}) { By(fmt.Sprintf(format, a...)) } diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 4150929a..0cb56123 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -2,7 +2,7 @@ images: # Use local dev images built source tree; - - name: kubespheredev/capkk-manager:e2e + - name: kubespheredev/capkk-controller-{ARCH}:e2e loadBehavior: mustLoad # ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS @@ -73,16 +73,15 @@ providers: - name: kubekey type: InfrastructureProvider versions: - - name: v2.3.0 + - name: v3.0.0 value: ../../../config/default contract: v1beta1 files: - sourcePath: "../data/infrastructure-kubekey/v1beta1/cluster-template.yaml" - sourcePath: "../data/shared/v1beta1_provider/metadata.yaml" replacements: - # To allow bugs to be catched. -# - old: controller:latest -# new: docker.io/kubesphere/capkk-controller:e2e + - old: docker.io/kubespheredev/capkk-controller:main + new: docker.io/kubespheredev/capkk-controller-amd64:e2e - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" diff --git a/test/e2e/config/e2e_k3s_conf.yaml b/test/e2e/config/e2e_k3s_conf.yaml new file mode 100644 index 00000000..cf1e1a70 --- /dev/null +++ b/test/e2e/config/e2e_k3s_conf.yaml @@ -0,0 +1,115 @@ +--- + +images: + # Use local dev images built source tree; + - name: kubespheredev/capkk-controller-{ARCH}:e2e + loadBehavior: mustLoad + - name: kubespheredev/k3s-bootstrap-controller-{ARCH}:e2e + loadBehavior: mustLoad + - name: kubespheredev/k3s-control-plane-controller-{ARCH}:e2e + loadBehavior: mustLoad + # + ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS + + # Cluster API v1beta1 Preloads + - name: quay.io/jetstack/cert-manager-cainjector:v1.8.2 + loadBehavior: tryLoad + - name: quay.io/jetstack/cert-manager-webhook:v1.8.2 + loadBehavior: tryLoad + - name: quay.io/jetstack/cert-manager-controller:v1.8.2 + loadBehavior: tryLoad + - name: k8s.gcr.io/cluster-api/cluster-api-controller:v1.2.1 + loadBehavior: tryLoad + - name: ghcr.io/kube-vip/kube-vip:v0.5.0 + loadBehavior: tryLoad + +providers: + - name: cluster-api + type: CoreProvider + versions: + - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: --metrics-bind-addr=127.0.0.1:8080 + new: --metrics-bind-addr=:8080 + - name: kubeadm + type: BootstrapProvider + versions: + - name: v3.0.0 + value: ../../../bootstrap/k3s/config/default + contract: v1beta1 + files: + - sourcePath: "../data/shared/v1beta1_provider/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: --metrics-bind-addr=127.0.0.1:8080 + new: --metrics-bind-addr=:8080 + - old: docker.io/kubespheredev/k3s-bootstrap-controller:main + new: docker.io/kubespheredev/k3s-bootstrap-controller-amd64:e2e + - name: kubeadm + type: ControlPlaneProvider + versions: + - name: v3.0.0 + value: ../../../controlplane/k3s/config/default + contract: v1beta1 + files: + - sourcePath: "../data/shared/v1beta1_provider/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: --metrics-bind-addr=127.0.0.1:8080 + new: --metrics-bind-addr=:8080 + - old: docker.io/kubespheredev/k3s-control-plane-controller:main + new: docker.io/kubespheredev/k3s-control-plane-controller-amd64:e2e + - name: kubekey + type: InfrastructureProvider + versions: + - name: v3.0.0 + value: ../../../config/default + contract: v1beta1 + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: docker.io/kubespheredev/capkk-controller:main + new: docker.io/kubespheredev/capkk-controller-amd64:e2e + files: + - sourcePath: "../data/infrastructure-kubekey/v1beta1/cluster-template.yaml" + - sourcePath: "../data/shared/v1beta1_provider/metadata.yaml" + +variables: + # Default variables for the e2e test; those values could be overridden via env variables, thus + # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. + # The following Kubernetes versions should be the latest versions with already published kindest/node images. + # This avoids building node images in the default case which improves the test duration significantly. + KUBERNETES_VERSION_MANAGEMENT: "v1.24.0" + KUBERNETES_VERSION: "v1.24.0" + CNI: "./data/cni/calico.yaml" + EVENT_BRIDGE_INSTANCE_STATE: "true" + EXP_CLUSTER_RESOURCE_SET: "true" + IP_FAMILY: "IPv4" + SERVICE_CIDRS: "10.233.0.0/18" + POD_CIDRS: "10.233.64.0/18" + SERVICE_DOMAIN: "cluster.local" + KKZONE: "cn" + USER_NAME: "ubuntu" + PASSWORD: "Qcloud@123" + INSTANCES: "[{address: 192.168.100.3}, {address: 192.168.100.4}]" + CONTROL_PLANE_ENDPOINT_IP: "192.168.100.100" + +intervals: + default/wait-controllers: [ "5m", "10s" ] + default/wait-cluster: [ "5m", "10s" ] + default/wait-control-plane: [ "30m", "10s" ] + default/wait-worker-nodes: [ "30m", "10s" ] + default/wait-delete-cluster: [ "5m", "10s" ] + default/wait-machine-upgrade: [ "15m", "1m" ] + default/wait-machine-remediation: [ "5m", "10s" ] + node-drain/wait-deployment-available: [ "3m", "10s" ] + node-drain/wait-machine-deleted: [ "2m", "10s" ] diff --git a/test/e2e/data/infrastructure-kubekey/v1beta1/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-kubekey/v1beta1/bases/cluster-with-kcp.yaml index 4493a1e4..58c45178 100644 --- a/test/e2e/data/infrastructure-kubekey/v1beta1/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-kubekey/v1beta1/bases/cluster-with-kcp.yaml @@ -5,6 +5,7 @@ kind: KKCluster metadata: name: '${CLUSTER_NAME}' spec: + distribution: kubernetes component: zone: '${KKZONE}' nodes: @@ -50,8 +51,7 @@ spec: roles: - control-plane repository: - iso: "auto" - update: true + iso: "none" --- # KubeadmControlPlane referenced by the Cluster object with # - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. diff --git a/test/e2e/data/infrastructure-kubekey/v1beta1/bases/md.yaml b/test/e2e/data/infrastructure-kubekey/v1beta1/bases/md.yaml index 16206bd5..ae0f4522 100644 --- a/test/e2e/data/infrastructure-kubekey/v1beta1/bases/md.yaml +++ b/test/e2e/data/infrastructure-kubekey/v1beta1/bases/md.yaml @@ -10,8 +10,7 @@ spec: - control-plane - worker repository: - iso: "auto" - update: true + iso: "none" --- # KubeadmConfigTemplate referenced by the MachineDeployment apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 diff --git a/test/e2e/data/k3s/v1beta1/bases/cluster-with-kcp.yaml b/test/e2e/data/k3s/v1beta1/bases/cluster-with-kcp.yaml new file mode 100644 index 00000000..22d29fcd --- /dev/null +++ b/test/e2e/data/k3s/v1beta1/bases/cluster-with-kcp.yaml @@ -0,0 +1,134 @@ +--- +# KKCluster object referenced by the Cluster object +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: KKCluster +metadata: + name: '${CLUSTER_NAME}' +spec: + distribution: k3s + component: + zone: '${KKZONE}' + nodes: + auth: + user: '${USER_NAME}' + password: '${PASSWORD}' + instances: '${INSTANCES}' + controlPlaneLoadBalancer: + host: '${CONTROL_PLANE_ENDPOINT_IP}' +--- +# Cluster object with +# - Reference to the KubeadmControlPlane object +# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + labels: + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + services: + cidrBlocks: ['${SERVICE_CIDRS}'] + pods: + cidrBlocks: ['${POD_CIDRS}'] + serviceDomain: '${SERVICE_DOMAIN}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: KKCluster + name: '${CLUSTER_NAME}' + controlPlaneRef: + kind: K3sControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: KKMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + roles: + - control-plane +--- +kind: K3sControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: KKMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" + k3sConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: lb_enable + value: "true" + - name: lb_port + value: "6443" + image: ghcr.io/kube-vip/kube-vip:v0.5.0 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/k3s/v1beta1/bases/crs.yaml b/test/e2e/data/k3s/v1beta1/bases/crs.yaml new file mode 100644 index 00000000..b62f66d3 --- /dev/null +++ b/test/e2e/data/k3s/v1beta1/bases/crs.yaml @@ -0,0 +1,23 @@ +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap diff --git a/test/e2e/data/k3s/v1beta1/bases/md.yaml b/test/e2e/data/k3s/v1beta1/bases/md.yaml new file mode 100644 index 00000000..cb7fc2c6 --- /dev/null +++ b/test/e2e/data/k3s/v1beta1/bases/md.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: KKMachineTemplate +metadata: + name: '${CLUSTER_NAME}-md-0' +spec: + template: + spec: + roles: + - control-plane + - worker +--- +# KubeadmConfigTemplate referenced by the MachineDeployment +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K3sConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +--- +# MachineDeployment object +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: K3sConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: KKMachineTemplate + failureDomain: fd4 diff --git a/test/e2e/data/k3s/v1beta1/cluster-template/kustomization.yaml b/test/e2e/data/k3s/v1beta1/cluster-template/kustomization.yaml new file mode 100644 index 00000000..adb5919c --- /dev/null +++ b/test/e2e/data/k3s/v1beta1/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/crs.yaml + diff --git a/test/e2e/data/shared/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/v1beta1_provider/metadata.yaml index d18ab22c..e0ec3ba5 100644 --- a/test/e2e/data/shared/v1beta1_provider/metadata.yaml +++ b/test/e2e/data/shared/v1beta1_provider/metadata.yaml @@ -5,27 +5,6 @@ # update this file only when a new major or minor version is released apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 releaseSeries: - - major: 0 - minor: 4 - contract: v1alpha2 - - major: 0 - minor: 5 - contract: v1alpha3 - - major: 0 - minor: 6 - contract: v1alpha3 - - major: 0 - minor: 7 - contract: v1alpha4 - - major: 1 + - major: 3 minor: 0 contract: v1beta1 - - major: 1 - minor: 1 - contract: v1beta1 - - major: 1 - minor: 2 - contract: v1beta1 - - major: 2 - minor: 3 - contract: v1beta1 diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index c2d1b4a1..5fe6bf6b 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" ctrl "sigs.k8s.io/controller-runtime" ) @@ -54,6 +55,10 @@ var ( // If it is not set, a local clusterctl repository (including a clusterctl config) will be created automatically. clusterctlConfig string + // alsoLogToFile enables additional logging to the 'ginkgo-log.txt' file in the artifact folder. + // These logs also contain timestamps. + alsoLogToFile bool + // skipCleanup prevents cleanup of test resources e.g. for debug purposes. skipCleanup bool ) @@ -80,6 +85,7 @@ var ( func init() { flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file") flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored") + flag.BoolVar(&alsoLogToFile, "e2e.also-log-to-file", true, "if true, ginkgo logs are additionally written to the `ginkgo-log.txt` file in the artifacts folder (including timestamps)") flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.StringVar(&clusterctlConfig, "e2e.clusterctl-config", "", "file which tests will use as a clusterctl config. If it is not set, a local clusterctl repository (including a clusterctl config) will be created automatically.") flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") @@ -101,8 +107,13 @@ func TestE2E(t *testing.T) { RegisterFailHandler(Fail) - junitReporter := framework.CreateJUnitReporterForProw(artifactFolder) - RunSpecsWithDefaultAndCustomReporters(t, "capkk-e2e", []Reporter{junitReporter}) + if alsoLogToFile { + w, err := ginkgoextensions.EnableFileLogging(filepath.Join(artifactFolder, "ginkgo-log.txt")) + g.Expect(err).ToNot(HaveOccurred()) + defer w.Close() + } + + RunSpecs(t, "capi-e2e") } // Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads). diff --git a/util/log/doc.go b/util/log/doc.go new file mode 100644 index 00000000..af33e743 --- /dev/null +++ b/util/log/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log provides log utils. +package log diff --git a/util/log/log.go b/util/log/log.go new file mode 100644 index 00000000..01464cc1 --- /dev/null +++ b/util/log/log.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log provides log utils. +package log + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// AddOwners adds the owners of an Object based on OwnerReferences as k/v pairs to the logger in ctx. +// Note: If an owner is a MachineSet we also add the owners from the MachineSet OwnerReferences. +func AddOwners(ctx context.Context, c client.Client, obj metav1.Object) (context.Context, logr.Logger, error) { + log := ctrl.LoggerFrom(ctx) + + owners, err := getOwners(ctx, c, obj) + if err != nil { + return nil, logr.Logger{}, errors.Wrapf(err, "failed to add object hierarchy to logger") + } + + // Add owners as k/v pairs. + keysAndValues := []interface{}{} + addedKinds := sets.NewString() + for _, owner := range owners { + // Don't add duplicate kinds. + if addedKinds.Has(owner.Kind) { + continue + } + + keysAndValues = append(keysAndValues, owner.Kind, klog.KRef(owner.Namespace, owner.Name)) + addedKinds.Insert(owner.Kind) + } + log = log.WithValues(keysAndValues...) + + ctx = ctrl.LoggerInto(ctx, log) + return ctx, log, nil +} + +// owner represents an owner of an object. +type owner struct { + Kind string + Name string + Namespace string +} + +// getOwners returns owners of an Object based on OwnerReferences. +// Note: If an owner is a MachineSet we also return the owners from the MachineSet OwnerReferences. +func getOwners(ctx context.Context, c client.Client, obj metav1.Object) ([]owner, error) { + owners := []owner{} + for _, ownerRef := range obj.GetOwnerReferences() { + owners = append(owners, owner{ + Kind: ownerRef.Kind, + Namespace: obj.GetNamespace(), + Name: ownerRef.Name, + }) + + // continue if the ownerRef does not point to a MachineSet. + if ownerRef.Kind != "MachineSet" { + continue + } + + // get owners of the MachineSet. + var ms clusterv1.MachineSet + if err := c.Get(ctx, client.ObjectKey{Namespace: obj.GetNamespace(), Name: ownerRef.Name}, &ms); err != nil { + // continue if the MachineSet doesn't exist. + if apierrors.IsNotFound(err) { + continue + } + return nil, errors.Wrapf(err, "failed to get owners: failed to get MachineSet %s", klog.KRef(obj.GetNamespace(), ownerRef.Name)) + } + + for _, ref := range ms.GetOwnerReferences() { + owners = append(owners, owner{ + Kind: ref.Kind, + Namespace: obj.GetNamespace(), + Name: ref.Name, + }) + } + } + + return owners, nil +} diff --git a/util/log/log_test.go b/util/log/log_test.go new file mode 100644 index 00000000..4b96962e --- /dev/null +++ b/util/log/log_test.go @@ -0,0 +1,215 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func Test_AddObjectHierarchy(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + + md := &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "MachineDeployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "development-3961-md-0-l4zn6", + }, + } + mdOwnerRef := metav1.OwnerReference{ + APIVersion: md.APIVersion, + Kind: md.Kind, + Name: md.Name, + } + + ms := &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "development-3961-md-0-l4zn6-758c9b7677", + OwnerReferences: []metav1.OwnerReference{mdOwnerRef}, + }, + } + msOwnerRef := metav1.OwnerReference{ + APIVersion: ms.APIVersion, + Kind: ms.Kind, + Name: ms.Name, + } + + tests := []struct { + name string + obj metav1.Object + objects []client.Object + expectedKeysAndValues []interface{} + }{ + { + name: "MachineSet owning Machine is added", + // MachineSet does not exist in Kubernetes so only MachineSet is added + // and MachineDeployment is not. + obj: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{msOwnerRef}, + Namespace: metav1.NamespaceDefault, + }, + }, + expectedKeysAndValues: []interface{}{ + "MachineSet", + klog.ObjectRef{Namespace: ms.Namespace, Name: ms.Name}, + }, + }, + { + name: "MachineDeployment and MachineSet owning Machine is added", + obj: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{msOwnerRef}, + Namespace: metav1.NamespaceDefault, + }, + }, + objects: []client.Object{ms}, + expectedKeysAndValues: []interface{}{ + "MachineSet", + klog.ObjectRef{Namespace: ms.Namespace, Name: ms.Name}, + "MachineDeployment", + klog.ObjectRef{Namespace: md.Namespace, Name: md.Name}, + }, + }, + { + name: "MachineDeployment owning MachineSet is added", + obj: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{mdOwnerRef}, + Namespace: metav1.NamespaceDefault, + }, + }, + expectedKeysAndValues: []interface{}{ + "MachineDeployment", + klog.ObjectRef{Namespace: md.Namespace, Name: md.Name}, + }, + }, + { + name: "KubeadmControlPlane and Machine owning DockerMachine are added", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerMachine", + "metadata": map[string]interface{}{ + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "Machine", + "name": "development-3961-4flkb-gzxnb", + }, + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "KubeadmControlPlane", + "name": "development-3961-4flkb", + }, + }, + "namespace": metav1.NamespaceDefault, + }, + }, + }, + expectedKeysAndValues: []interface{}{ + "Machine", + klog.ObjectRef{Namespace: metav1.NamespaceDefault, Name: "development-3961-4flkb-gzxnb"}, + "KubeadmControlPlane", + klog.ObjectRef{Namespace: metav1.NamespaceDefault, Name: "development-3961-4flkb"}, + }, + }, + { + name: "Duplicate Cluster ownerRef should be deduplicated", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "DockerCluster", + "metadata": map[string]interface{}{ + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "Cluster", + "name": "development-3961", + }, + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "Cluster", + "name": "development-3961", + }, + }, + "namespace": metav1.NamespaceDefault, + }, + }, + }, + expectedKeysAndValues: []interface{}{ + "Cluster", + klog.ObjectRef{Namespace: metav1.NamespaceDefault, Name: "development-3961"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.objects...). + Build() + + // Create fake log sink so we can later verify the added k/v pairs. + ctx := ctrl.LoggerInto(context.Background(), logr.New(&fakeLogSink{})) + + _, logger, err := AddOwners(ctx, c, tt.obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(logger.GetSink().(fakeLogSink).keysAndValues).To(Equal(tt.expectedKeysAndValues)) + }) + } +} + +type fakeLogSink struct { + // Embedding NullLogSink so we don't have to implement all funcs + // of the LogSink interface. + log.NullLogSink + keysAndValues []interface{} +} + +// WithValues stores keysAndValues so we can later check if the +// right keysAndValues have been added. +func (f fakeLogSink) WithValues(keysAndValues ...interface{}) logr.LogSink { + f.keysAndValues = keysAndValues + return f +} diff --git a/util/secret/certificates.go b/util/secret/certificates.go new file mode 100644 index 00000000..3c0338b7 --- /dev/null +++ b/util/secret/certificates.go @@ -0,0 +1,400 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package secret + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "math/big" + "path/filepath" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/cert" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + rootOwnerValue = "root:root" + + // DefaultCertificatesDir is the default directory where k3s certificates are stored + DefaultCertificatesDir = "/var/lib/rancher/k3s/server/tls" +) + +var ( + // ErrMissingCertificate is an error indicating a certificate is entirely missing + ErrMissingCertificate = errors.New("missing certificate") + + // ErrMissingCrt is an error indicating the crt file is missing from the certificate + ErrMissingCrt = errors.New("missing crt data") + + // ErrMissingKey is an error indicating the key file is missing from the certificate + ErrMissingKey = errors.New("missing key data") +) + +// Certificates are the certificates necessary to bootstrap a cluster. +type Certificates []*Certificate + +// NewCertificatesForInitialControlPlane returns a list of certificates configured for a control plane node +func NewCertificatesForInitialControlPlane() Certificates { + certificatesDir := DefaultCertificatesDir + + certificates := Certificates{ + &Certificate{ + Purpose: ClusterCA, + CertFile: filepath.Join(certificatesDir, "server-ca.crt"), + KeyFile: filepath.Join(certificatesDir, "server-ca.key"), + }, + &Certificate{ + Purpose: ClientClusterCA, + CertFile: filepath.Join(certificatesDir, "client-ca.crt"), + KeyFile: filepath.Join(certificatesDir, "client-ca.key"), + }, + } + + return certificates +} + +// GetByPurpose returns a certificate by the given name. +// This could be removed if we use a map instead of a slice to hold certificates, however other code becomes more complex. +func (c Certificates) GetByPurpose(purpose Purpose) *Certificate { + for _, certificate := range c { + if certificate.Purpose == purpose { + return certificate + } + } + return nil +} + +// Lookup looks up each certificate from secrets and populates the certificate with the secret data. +func (c Certificates) Lookup(ctx context.Context, ctrlclient client.Client, clusterName client.ObjectKey) error { + // Look up each certificate as a secret and populate the certificate/key + for _, certificate := range c { + s := &corev1.Secret{} + key := client.ObjectKey{ + Name: Name(clusterName.Name, certificate.Purpose), + Namespace: clusterName.Namespace, + } + if err := ctrlclient.Get(ctx, key, s); err != nil { + if apierrors.IsNotFound(err) { + if certificate.External { + return errors.WithMessage(err, "external certificate not found") + } + continue + } + return errors.WithStack(err) + } + // If a user has a badly formatted secret it will prevent the cluster from working. + kp, err := secretToKeyPair(s) + if err != nil { + return err + } + certificate.KeyPair = kp + } + return nil +} + +// EnsureAllExist ensure that there is some data present for every certificate +func (c Certificates) EnsureAllExist() error { + for _, certificate := range c { + if certificate.KeyPair == nil { + return ErrMissingCertificate + } + if len(certificate.KeyPair.Cert) == 0 { + return errors.Wrapf(ErrMissingCrt, "for certificate: %s", certificate.Purpose) + } + if !certificate.External { + if len(certificate.KeyPair.Key) == 0 { + return errors.Wrapf(ErrMissingKey, "for certificate: %s", certificate.Purpose) + } + } + } + return nil +} + +// Generate will generate any certificates that do not have KeyPair data. +func (c Certificates) Generate() error { + for _, certificate := range c { + if certificate.KeyPair == nil { + err := certificate.Generate() + if err != nil { + return err + } + } + } + return nil +} + +// SaveGenerated will save any certificates that have been generated as Kubernetes secrets. +func (c Certificates) SaveGenerated(ctx context.Context, ctrlclient client.Client, clusterName client.ObjectKey, owner metav1.OwnerReference) error { + for _, certificate := range c { + if !certificate.Generated { + continue + } + s := certificate.AsSecret(clusterName, owner) + if err := ctrlclient.Create(ctx, s); err != nil { + return errors.WithStack(err) + } + } + return nil +} + +// LookupOrGenerate is a convenience function that wraps cluster bootstrap certificate behavior. +func (c Certificates) LookupOrGenerate(ctx context.Context, ctrlclient client.Client, clusterName client.ObjectKey, owner metav1.OwnerReference) error { + // Find the certificates that exist + if err := c.Lookup(ctx, ctrlclient, clusterName); err != nil { + return err + } + + // Generate the certificates that don't exist + if err := c.Generate(); err != nil { + return err + } + + // Save any certificates that have been generated + if err := c.SaveGenerated(ctx, ctrlclient, clusterName, owner); err != nil { + return err + } + + return nil +} + +// Certificate represents a single certificate CA. +type Certificate struct { + Generated bool + External bool + Purpose Purpose + KeyPair *certs.KeyPair + CertFile, KeyFile string +} + +// Hashes hashes all the certificates stored in a CA certificate. +func (c *Certificate) Hashes() ([]string, error) { + certificates, err := cert.ParseCertsPEM(c.KeyPair.Cert) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse %s certificate", c.Purpose) + } + out := make([]string, 0) + for _, c := range certificates { + out = append(out, hashCert(c)) + } + return out, nil +} + +// hashCert calculates the sha256 of certificate. +func hashCert(certificate *x509.Certificate) string { + spkiHash := sha256.Sum256(certificate.RawSubjectPublicKeyInfo) + return "sha256:" + strings.ToLower(hex.EncodeToString(spkiHash[:])) +} + +// AsSecret converts a single certificate into a Kubernetes secret. +func (c *Certificate) AsSecret(clusterName client.ObjectKey, owner metav1.OwnerReference) *corev1.Secret { + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterName.Namespace, + Name: Name(clusterName.Name, c.Purpose), + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName.Name, + }, + }, + Data: map[string][]byte{ + TLSKeyDataName: c.KeyPair.Key, + TLSCrtDataName: c.KeyPair.Cert, + }, + Type: clusterv1.ClusterSecretType, + } + + if c.Generated { + s.OwnerReferences = []metav1.OwnerReference{owner} + } + return s +} + +// AsFiles converts the certificate to a slice of Files that may have 0, 1 or 2 Files. +func (c *Certificate) AsFiles() []bootstrapv1.File { + out := make([]bootstrapv1.File, 0) + if len(c.KeyPair.Cert) > 0 { + out = append(out, bootstrapv1.File{ + Path: c.CertFile, + Owner: rootOwnerValue, + Permissions: "0640", + Content: string(c.KeyPair.Cert), + }) + } + if len(c.KeyPair.Key) > 0 { + out = append(out, bootstrapv1.File{ + Path: c.KeyFile, + Owner: rootOwnerValue, + Permissions: "0600", + Content: string(c.KeyPair.Key), + }) + } + return out +} + +// Generate will generate a new certificate. +func (c *Certificate) Generate() error { + // Do not generate the APIServerEtcdClient key pair. It is user supplied + if c.Purpose == APIServerEtcdClient { + return nil + } + + generator := generateCACert + if c.Purpose == ServiceAccount { + generator = generateServiceAccountKeys + } + + kp, err := generator() + if err != nil { + return err + } + c.KeyPair = kp + c.Generated = true + + return nil +} + +// AsFiles converts a slice of certificates into bootstrap files. +func (c Certificates) AsFiles() []bootstrapv1.File { + clusterCA := c.GetByPurpose(ClusterCA) + clientClusterCA := c.GetByPurpose(ClientClusterCA) + + etcdCA := c.GetByPurpose(EtcdCA) + + certFiles := make([]bootstrapv1.File, 0) + if clusterCA != nil { + certFiles = append(certFiles, clusterCA.AsFiles()...) + } + if clientClusterCA != nil { + certFiles = append(certFiles, clientClusterCA.AsFiles()...) + } + if etcdCA != nil { + certFiles = append(certFiles, etcdCA.AsFiles()...) + } + + // these will only exist if external etcd was defined and supplied by the user + apiserverEtcdClientCert := c.GetByPurpose(APIServerEtcdClient) + if apiserverEtcdClientCert != nil { + certFiles = append(certFiles, apiserverEtcdClientCert.AsFiles()...) + } + + return certFiles +} + +func secretToKeyPair(s *corev1.Secret) (*certs.KeyPair, error) { + c, exists := s.Data[TLSCrtDataName] + if !exists { + return nil, errors.Errorf("missing data for key %s", TLSCrtDataName) + } + + // In some cases (external etcd) it's ok if the etcd.key does not exist. + // TODO: some other function should ensure that the certificates we need exist. + key, exists := s.Data[TLSKeyDataName] + if !exists { + key = []byte("") + } + + return &certs.KeyPair{ + Cert: c, + Key: key, + }, nil +} + +func generateCACert() (*certs.KeyPair, error) { + x509Cert, privKey, err := newCertificateAuthority() + if err != nil { + return nil, err + } + return &certs.KeyPair{ + Cert: certs.EncodeCertPEM(x509Cert), + Key: certs.EncodePrivateKeyPEM(privKey), + }, nil +} + +func generateServiceAccountKeys() (*certs.KeyPair, error) { + saCreds, err := certs.NewPrivateKey() + if err != nil { + return nil, err + } + saPub, err := certs.EncodePublicKeyPEM(&saCreds.PublicKey) + if err != nil { + return nil, err + } + return &certs.KeyPair{ + Cert: saPub, + Key: certs.EncodePrivateKeyPEM(saCreds), + }, nil +} + +// newCertificateAuthority creates new certificate and private key for the certificate authority +func newCertificateAuthority() (*x509.Certificate, *rsa.PrivateKey, error) { + key, err := certs.NewPrivateKey() + if err != nil { + return nil, nil, err + } + + c, err := newSelfSignedCACert(key) + if err != nil { + return nil, nil, err + } + + return c, key, nil +} + +// newSelfSignedCACert creates a CA certificate. +func newSelfSignedCACert(key *rsa.PrivateKey) (*x509.Certificate, error) { + cfg := certs.Config{ + CommonName: "kubernetes", + } + + now := time.Now().UTC() + + tmpl := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + NotBefore: now.Add(time.Minute * -5), + NotAfter: now.Add(time.Hour * 24 * 365 * 10), // 10 years + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + MaxPathLenZero: true, + BasicConstraintsValid: true, + MaxPathLen: 0, + IsCA: true, + } + + b, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, errors.Wrapf(err, "failed to create self signed CA certificate: %+v", tmpl) + } + + c, err := x509.ParseCertificate(b) + return c, errors.WithStack(err) +} diff --git a/util/secret/consts.go b/util/secret/consts.go new file mode 100644 index 00000000..f207f47e --- /dev/null +++ b/util/secret/consts.go @@ -0,0 +1,57 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package secret + +// Purpose is the name to append to the secret generated for a cluster. +type Purpose string + +const ( + // KubeconfigDataName is the key used to store a Kubeconfig in the secret's data field. + KubeconfigDataName = "value" + + // TLSKeyDataName is the key used to store a TLS private key in the secret's data field. + TLSKeyDataName = "tls.key" + + // TLSCrtDataName is the key used to store a TLS certificate in the secret's data field. + TLSCrtDataName = "tls.crt" + + // Kubeconfig is the secret name suffix storing the Cluster Kubeconfig. + Kubeconfig = Purpose("kubeconfig") + + // ClusterCA is the secret name suffix for APIServer CA. + ClusterCA = Purpose("ca") + + // ClientClusterCA is the secret name suffix for APIServer CA. + ClientClusterCA = Purpose("cca") + + // EtcdCA is the secret name suffix for the Etcd CA + EtcdCA Purpose = "etcd" + + // ServiceAccount is the secret name suffix for the Service Account keys + ServiceAccount Purpose = "sa" + + // FrontProxyCA is the secret name suffix for Front Proxy CA + FrontProxyCA Purpose = "proxy" + + // APIServerEtcdClient is the secret name of user-supplied secret containing the apiserver-etcd-client key/cert + APIServerEtcdClient Purpose = "apiserver-etcd-client" +) + +var ( + // allSecretPurposes defines a lists with all the secret suffix used by Cluster API + allSecretPurposes = []Purpose{Kubeconfig, ClusterCA, EtcdCA, ServiceAccount, FrontProxyCA, APIServerEtcdClient} +) diff --git a/util/secret/doc.go b/util/secret/doc.go new file mode 100644 index 00000000..bfe0749e --- /dev/null +++ b/util/secret/doc.go @@ -0,0 +1,18 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package secret provides a secret store for storing secrets. +package secret diff --git a/util/secret/secret.go b/util/secret/secret.go new file mode 100644 index 00000000..de88b144 --- /dev/null +++ b/util/secret/secret.go @@ -0,0 +1,71 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package secret + +import ( + "context" + "fmt" + "strings" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Get retrieves the specified Secret (if any) from the given +// cluster name and namespace. +func Get(ctx context.Context, c client.Reader, cluster client.ObjectKey, purpose Purpose) (*corev1.Secret, error) { + return GetFromNamespacedName(ctx, c, cluster, purpose) +} + +// GetFromNamespacedName retrieves the specified Secret (if any) from the given +// cluster name and namespace. +func GetFromNamespacedName(ctx context.Context, c client.Reader, clusterName client.ObjectKey, purpose Purpose) (*corev1.Secret, error) { + secret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: clusterName.Namespace, + Name: Name(clusterName.Name, purpose), + } + + if err := c.Get(ctx, secretKey, secret); err != nil { + return nil, err + } + + return secret, nil +} + +// Name returns the name of the secret for a cluster. +func Name(cluster string, suffix Purpose) string { + return fmt.Sprintf("%s-%s", cluster, suffix) +} + +// ParseSecretName return the cluster name and the suffix Purpose in name is a valid cluster secrets, +// otherwise it return error. +func ParseSecretName(name string) (string, Purpose, error) { + separatorPos := strings.LastIndex(name, "-") + if separatorPos == -1 { + return "", "", errors.Errorf("%q is not a valid cluster secret name. The purpose suffix is missing", name) + } + clusterName := name[:separatorPos] + purposeSuffix := Purpose(name[separatorPos+1:]) + for _, purpose := range allSecretPurposes { + if purpose == purposeSuffix { + return clusterName, purposeSuffix, nil + } + } + return "", "", errors.Errorf("%q is not a valid cluster secret name. Invalid purpose suffix", name) +}