Merge pull request #1542 from 24sama/v3.0.0-capkk-bootstrap

feat: add k3s control-plane provider and bootstrap provider
This commit is contained in:
KubeSphere CI Bot 2022-10-21 16:39:41 +08:00 committed by GitHub
commit 07d184b8db
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
191 changed files with 13552 additions and 416 deletions

View File

@ -19,10 +19,10 @@ jobs:
GO111MODULE: on
steps:
- name: Set up Go 1.18
- name: Set up Go 1.19
uses: actions/setup-go@v3
with:
go-version: 1.18
go-version: 1.19
id: go
- name: Check out code into the Go module directory

View File

@ -20,7 +20,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.18
go-version: 1.19
- name: golangci-lint
uses: golangci/golangci-lint-action@v3.2.0
with:

View File

@ -19,7 +19,7 @@ jobs:
- name: Install go
uses: actions/setup-go@v3
with:
go-version: '^1.18'
go-version: '^1.19'
- name: generate release artifacts
run: |
make release

View File

@ -124,14 +124,16 @@ linters-settings:
# CAPKK
- pkg: github.com/kubesphere/kubekey/api/v1beta1
alias: infrav1
- pkg: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1
alias: infrabootstrapv1
nolintlint:
allow-unused: false
allow-leading-space: false
require-specific: true
staticcheck:
go: "1.18"
go: "1.19"
stylecheck:
go: "1.18"
go: "1.19"
gosec:
excludes:
- G307 # Deferring unsafe method "Close" on type "\*os.File"
@ -156,8 +158,6 @@ linters-settings:
- commentFormatting
- filepathJoin
- commentedOutCode
unused:
go: "1.18"
issues:
max-same-issues: 0
max-issues-per-linter: 0
@ -166,83 +166,75 @@ issues:
exclude-use-default: false
exclude-rules:
- linters:
- revive
- revive
text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported"
- linters:
- errcheck
- errcheck
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
# Exclude some packages or code to require comments, for example test code, or fake clients.
# Exclude revive's exported for certain packages and code, e.g. tests and fake.
- linters:
- revive
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
source: (func|type).*Fake.*
- linters:
- revive
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
path: fake_\.go
- linters:
- revive
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
path: cmd/clusterctl/internal/test/providers.*.go
path: .*test/(providers|framework|e2e).*.go
- linters:
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
path: "(framework|e2e)/.*.go"
# Disable unparam "always receives" which might not be really
# useful when building libraries.
- errcheck
text: Error return value is not checked
path: _test\.go
- linters:
- unparam
- errcheck
text: Error return value of (.+) is not checked
path: _test\.go
- linters:
- gosec
text: "G108: Profiling endpoint is automatically exposed on /debug/pprof"
- linters:
- godot
text: "Comment should end in a period"
path: "(.*)/(v1beta1|v1beta2)/(.*)types.go"
- linters:
- errcheck
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
# With Go 1.16, the new embed directive can be used with an un-named import,
# revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us.
# This directive allows the embed package to be imported with an underscore everywhere.
- linters:
- revive
source: _ "embed"
# This directive allows the variable in defaults.go files to have underscore
- linters:
- revive
text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)"
path: .*/defaults.go
# Disable unparam "always receives" which might not be really
# useful when building libraries.
- linters:
- unparam
text: always receives
# Dot imports for gomega or ginkgo are allowed
# within test files.
# Dot imports for gomega or ginkgo are allowed
# within test files.
- path: _test\.go
text: should not use dot imports
- path: (framework|e2e)/.*.go
text: should not use dot imports
- path: _test\.go
text: cyclomatic complexity
# Append should be able to assign to a different var/slice.
- linters:
- gocritic
- unparam
text: (.+) - (`t`|`g`) is unused
- path: _test\.go
text: cyclomatic complexity
# Append should be able to assign to a different var/slice.
- linters:
- gocritic
text: "appendAssign: append result not assigned to the same slice"
# ifshort flags variables that are only used in the if-statement even though there is
# already a SimpleStmt being used in the if-statement in question.
- linters:
- ifshort
text: "variable .* is only used in the if-statement"
path: controllers/mdutil/util.go
# Disable linters for conversion
- linters:
- staticcheck
text: "SA1019: in.(.+) is deprecated"
path: .*(api|types)\/.*\/conversion.*\.go$
- linters:
- revive
text: exported (method|function|type|const) (.+) should have comment or be unexported
path: .*(api|types|test)\/.*\/conversion.*\.go$
- linters:
- revive
text: "var-naming: don't use underscores in Go names;"
path: .*(api|types|test)\/.*\/conversion.*\.go$
- linters:
- revive
text: "receiver-naming: receiver name"
path: .*(api|types)\/.*\/conversion.*\.go$
- linters:
- stylecheck
text: "ST1003: should not use underscores in Go names;"
path: .*(api|types|test)\/.*\/conversion.*\.go$
- linters:
- stylecheck
text: "ST1016: methods on the same type should have the same receiver name"
path: .*(api|types)\/.*\/conversion.*\.go$
# hack/tools
- linters:
- typecheck
text: import (".+") is a program, not an importable package
path: ^tools\.go$
# We don't care about defer in for loops in test files.
- linters:
- gocritic
@ -262,4 +254,3 @@ run:
- "vendored_openapi\\.go$"
- "cmd"
allow-parallel-runners: true
go: '1.18'

View File

@ -10,7 +10,7 @@ WORKDIR /tmp
RUN apk add --no-cache ca-certificates
# Build the manager binary
FROM golang:1.18 as builder
FROM golang:1.19 as builder
# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
ARG goproxy=https://goproxy.cn,direct

View File

@ -6,7 +6,7 @@ SHELL:=/usr/bin/env bash
#
# Go.
#
GO_VERSION ?= 1.18.3
GO_VERSION ?= 1.19.2
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
# Use GOPROXY environment variable if set
@ -72,9 +72,17 @@ REGISTRY ?= docker.io/kubespheredev
PROD_REGISTRY ?= docker.io/kubesphere
# capkk
CAPKK_IMAGE_NAME ?= capkk-manager
CAPKK_IMAGE_NAME ?= capkk-controller
CAPKK_CONTROLLER_IMG ?= $(REGISTRY)/$(CAPKK_IMAGE_NAME)
# bootstrap
K3S_BOOTSTRAP_IMAGE_NAME ?= k3s-bootstrap-controller
K3S_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_BOOTSTRAP_IMAGE_NAME)
# control plane
K3S_CONTROL_PLANE_IMAGE_NAME ?= k3s-control-plane-controller
K3S_CONTROL_PLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(K3S_CONTROL_PLANE_IMAGE_NAME)
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
TAG ?= dev
@ -110,7 +118,7 @@ help: ## Display this help.
##@ generate:
ALL_GENERATE_MODULES = capkk
ALL_GENERATE_MODULES = capkk k3s-bootstrap k3s-control-plane
.PHONY: generate
generate: ## Run all generate-manifests-*, generate-go-deepcopy-* targets
@ -131,17 +139,55 @@ generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e
output:webhook:dir=./config/webhook \
webhook
.PHONY: generate-manifests-k3s-bootstrap
generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./bootstrap/k3s/api/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./bootstrap/k3s/config/crd/bases \
output:rbac:dir=./bootstrap/k3s/config/rbac \
output:webhook:dir=./bootstrap/k3s/config/webhook \
webhook
.PHONY: generate-manifests-k3s-control-plane
generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e.g. CRD, RBAC etc. for core
$(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./controlplane/k3s/api/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./controlplane/k3s/config/crd/bases \
output:rbac:dir=./controlplane/k3s/config/rbac \
output:webhook:dir=./controlplane/k3s/config/webhook \
webhook
.PHONY: generate-go-deepcopy
generate-go-deepcopy: ## Run all generate-go-deepcopy-* targets
$(MAKE) $(addprefix generate-go-deepcopy-,$(ALL_GENERATE_MODULES))
.PHONY: generate-go-deepcopy-capkk
generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for core
generate-go-deepcopy-capkk: $(CONTROLLER_GEN) ## Generate deepcopy go code for capkk
$(MAKE) clean-generated-deepcopy SRC_DIRS="./api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./api/... \
.PHONY: generate-go-deepcopy-k3s-bootstrap
generate-go-deepcopy-k3s-bootstrap: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-bootstrap
$(MAKE) clean-generated-deepcopy SRC_DIRS="./bootstrap/k3s/api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./bootstrap/k3s/api/... \
.PHONY: generate-go-deepcopy-k3s-control-plane
generate-go-deepcopy-k3s-control-plane: $(CONTROLLER_GEN) ## Generate deepcopy go code for k3s-control-plane
$(MAKE) clean-generated-deepcopy SRC_DIRS="./controlplane/k3s/api"
$(CONTROLLER_GEN) \
object:headerFile=./hack/boilerplate.go.txt \
paths=./controlplane/k3s/api/... \
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
go mod tidy
@ -194,7 +240,7 @@ verify-gen: generate ## Verify go generated files are up to date
kk:
CGO_ENABLED=0 go build -trimpath -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/kk github.com/kubesphere/kubekey/cmd/kk;
ALL_MANAGERS = capkk
ALL_MANAGERS = capkk k3s-bootstrap k3s-control-plane
.PHONY: managers
managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
@ -203,6 +249,14 @@ managers: $(addprefix manager-,$(ALL_MANAGERS)) ## Run all manager-* targets
manager-capkk: ## Build the capkk manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/manager github.com/kubesphere/kubekey
.PHONY: manager-k3s-bootstrap
manager-k3s-bootstrap: ## Build the k3s bootstrap manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-bootstrap-manager github.com/kubesphere/kubekey/bootstrap/k3s
.PHONY: manager-k3s-control-plane
manager-k3s-control-plane: ## Build the k3s control plane manager binary into the ./bin folder
go build -trimpath -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/k3s-control-plane-manager github.com/kubesphere/kubekey/controlplane/k3s
.PHONY: docker-pull-prerequisites
docker-pull-prerequisites:
docker pull docker.io/docker/dockerfile:1.4
@ -214,19 +268,33 @@ docker-build-all: $(addprefix docker-build-,$(ALL_ARCH)) ## Build docker images
docker-build-%:
$(MAKE) ARCH=$* docker-build
ALL_DOCKER_BUILD = capkk
ALL_DOCKER_BUILD = capkk k3s-bootstrap k3s-control-plane
.PHONY: docker-build
docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all providers
$(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD))
.PHONY: docker-build-capkk
docker-build-capkk: ## Build the docker image for capkk
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
$(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG)
$(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
.PHONY: docker-build-k3s-bootstrap
docker-build-k3s-bootstrap: ## Build the docker image for k3s bootstrap controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
$(MAKE) set-manifest-image MANIFEST_IMG=$(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/k3s/config/default/manager_pull_policy.yaml"
.PHONY: docker-build-k3s-control-plane
docker-build-k3s-control-plane: ## Build the docker image for k3s control plane controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/k3s --build-arg ldflags="$(LDFLAGS)" . -t $(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH):$(TAG)
$(MAKE) set-manifest-image MANIFEST_IMG=$(K3S_CONTROL_PLANE_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./controlplane/k3s/config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/k3s/config/default/manager_pull_policy.yaml"
.PHONY: docker-build-e2e
docker-build-e2e: ## Build the docker image for capkk
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t "$(CAPKK_CONTROLLER_IMG):e2e"
$(MAKE) set-manifest-image MANIFEST_IMG=$(CAPKK_CONTROLLER_IMG) MANIFEST_TAG="e2e" TARGET_RESOURCE="./config/default/manager_image_patch.yaml"
$(MAKE) set-manifest-pull-policy PULL_POLICY=IfNotPresent TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"
$(MAKE) docker-build REGISTRY=docker.io/kubespheredev PULL_POLICY=IfNotPresent TAG=e2e
## --------------------------------------
## Deployment
@ -293,6 +361,10 @@ test-cover: ## Run unit and integration tests and generate a coverage report
test-e2e: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run
.PHONY: test-e2e-k3s
test-e2e-k3s: ## Run e2e tests
$(MAKE) -C $(TEST_DIR)/e2e run-k3s
## --------------------------------------
## Release
## --------------------------------------
@ -380,6 +452,7 @@ release-templates: $(RELEASE_DIR) ## Generate release templates
.PHONY: docker-push
docker-push: ## Push the docker images
docker push $(CAPKK_CONTROLLER_IMG)-$(ARCH):$(TAG)
docker push $(K3S_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG)
.PHONY: set-manifest-pull-policy
set-manifest-pull-policy:

View File

@ -26,10 +26,17 @@ const (
// ClusterFinalizer allows ReconcileKKCluster to clean up KK resources associated with KKCluster before
// removing it from the apiserver.
ClusterFinalizer = "kkcluster.infrastructure.cluster.x-k8s.io"
// KUBERNETES the Kubernetes distributions
KUBERNETES = "kubernetes"
// K3S the K3S distributions
K3S = "k3s"
)
// KKClusterSpec defines the desired state of KKCluster
type KKClusterSpec struct {
// Distribution represents the Kubernetes distribution type of the cluster.
Distribution string `json:"distribution,omitempty"`
// Nodes represents the information about the nodes available to the cluster
Nodes Nodes `json:"nodes"`

View File

@ -56,10 +56,20 @@ var _ webhook.Defaulter = &KKCluster{}
func (k *KKCluster) Default() {
kkclusterlog.Info("default", "name", k.Name)
defaultDistribution(&k.Spec)
defaultAuth(&k.Spec.Nodes.Auth)
defaultInstance(&k.Spec)
}
func defaultDistribution(spec *KKClusterSpec) {
if spec.Distribution == "" {
spec.Distribution = "kubernetes"
}
if spec.Distribution == "k8s" {
spec.Distribution = "kubernetes"
}
}
func defaultAuth(auth *Auth) {
if auth.User == "" {
auth.User = defaultSSHUser
@ -101,6 +111,7 @@ func (k *KKCluster) ValidateCreate() error {
kkclusterlog.Info("validate create", "name", k.Name)
var allErrs field.ErrorList
allErrs = append(allErrs, validateDistribution(k.Spec)...)
allErrs = append(allErrs, validateClusterNodes(k.Spec.Nodes)...)
allErrs = append(allErrs, validateLoadBalancer(k.Spec.ControlPlaneLoadBalancer)...)
@ -143,6 +154,20 @@ func (k *KKCluster) ValidateDelete() error {
return nil
}
func validateDistribution(spec KKClusterSpec) []*field.Error {
var errs field.ErrorList
path := field.NewPath("spec", "distribution")
switch spec.Distribution {
case K3S:
return errs
case KUBERNETES:
return errs
default:
errs = append(errs, field.NotSupported(path, spec.Distribution, []string{K3S, KUBERNETES}))
}
return errs
}
func validateLoadBalancer(loadBalancer *KKLoadBalancerSpec) []*field.Error {
var errs field.ErrorList
path := field.NewPath("spec", "controlPlaneLoadBalancer")

View File

@ -44,6 +44,7 @@ var _ webhook.Defaulter = &KKClusterTemplate{}
func (r *KKClusterTemplate) Default() {
kkclustertemplatelog.Info("default", "name", r.Name)
defaultDistribution(&r.Spec.Template.Spec)
defaultAuth(&r.Spec.Template.Spec.Nodes.Auth)
defaultInstance(&r.Spec.Template.Spec)
}
@ -57,6 +58,7 @@ func (r *KKClusterTemplate) ValidateCreate() error {
kkclustertemplatelog.Info("validate create", "name", r.Name)
var allErrs field.ErrorList
allErrs = append(allErrs, validateDistribution(r.Spec.Template.Spec)...)
allErrs = append(allErrs, validateClusterNodes(r.Spec.Template.Spec.Nodes)...)
allErrs = append(allErrs, validateLoadBalancer(r.Spec.Template.Spec.ControlPlaneLoadBalancer)...)

35
bootstrap/k3s/PROJECT Normal file
View File

@ -0,0 +1,35 @@
domain: cluster.x-k8s.io
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: k3s
repo: github.com/kubesphere/kubekey/bootstrap/k3s
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cluster.x-k8s.io
group: bootstrap
kind: K3sConfig
path: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1
version: v1beta1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
domain: cluster.x-k8s.io
group: bootstrap
kind: K3sConfigTemplate
path: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1
version: v1beta1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
version: "3"

View File

@ -0,0 +1,36 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1beta1 contains API Schema definitions for the bootstrap v1beta1 API group
// +kubebuilder:object:generate=true
// +groupName=bootstrap.cluster.x-k8s.io
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "bootstrap.cluster.x-k8s.io", Version: "v1beta1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,161 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// ServerConfiguration defines the desired state of k3s server configuration.
type ServerConfiguration struct {
// Database is the database configuration.
Database Database `json:"database,omitempty"`
// Listener is the listener configuration.
Listener Listener `json:"listener,omitempty"`
// Networking is the networking configuration.
Networking Networking `json:"networking,omitempty"`
// Agent is the agent configuration.
Agent AgentConfiguration `json:"agent,omitempty"`
}
// AgentConfiguration defines the desired state of k3s agent configuration.
type AgentConfiguration struct {
// Node defines the k3s agent node configuration.
Node AgentNode `json:"node,omitempty"`
// Runtime defines the k3s agent runtime configuration.
Runtime AgentRuntime `json:"runtime,omitempty"`
// Networking defines the k3s agent networking configuration.
Networking AgentNetworking `json:"networking,omitempty"`
}
// Database defines the desired state of k3s database configuration.
type Database struct {
// DataStoreEndPoint specify etcd, Mysql, Postgres, or Sqlite (default) data source name.
DataStoreEndPoint string `json:"dataStoreEndPoint,omitempty"`
// DataStoreCAFile TLS Certificate Authority file used to secure datastore backend communication.
DataStoreCAFile string `json:"dataStoreCAFile,omitempty"`
// DataStoreCertFile TLS certification file used to secure datastore backend communication.
DataStoreCertFile string `json:"dataStoreCertFile,omitempty"`
// DataStoreKeyFile TLS key file used to secure datastore backend communication.
DataStoreKeyFile string `json:"dataStoreKeyFile,omitempty"`
// ClusterInit initialize a new cluster using embedded Etcd.
ClusterInit bool `json:"clusterInit,omitempty"`
}
// Cluster is the desired state of k3s cluster configuration.
type Cluster struct {
// Token shared secret used to join a server or agent to a cluster.
Token string `json:"token,omitempty"`
// TokenFile file containing the cluster-secret/token.
TokenFile string `json:"tokenFile,omitempty"`
// Server which server to connect to, used to join a cluster.
Server string `json:"server,omitempty"`
}
// Listener defines the desired state of k3s listener configuration.
type Listener struct {
// BindAddress k3s bind address.
BindAddress string `json:"bindAddress,omitempty"`
// HTTPSListenPort HTTPS listen port.
HTTPSListenPort int `json:"httpsListenPort,omitempty"`
// AdvertiseAddress IP address that apiserver uses to advertise to members of the cluster.
AdvertiseAddress string `json:"advertiseAddress,omitempty"`
// AdvertisePort Port that apiserver uses to advertise to members of the cluster (default: listen-port).
AdvertisePort int `json:"advertisePort,omitempty"`
// TLSSan Add additional hostname or IP as a Subject Alternative Name in the TLS cert.
TLSSan string `json:"tlsSan,omitempty"`
}
// Networking defines the desired state of k3s networking configuration.
type Networking struct {
// ClusterCIDR Network CIDR to use for pod IPs.
ClusterCIDR string `json:"clusterCIDR,omitempty"`
// ServiceCIDR Network CIDR to use for services IPs.
ServiceCIDR string `json:"serviceCIDR,omitempty"`
// ServiceNodePortRange Port range to reserve for services with NodePort visibility.
ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"`
// ClusterDNS cluster IP for coredns service. Should be in your service-cidr range.
ClusterDNS string `json:"clusterDNS,omitempty"`
// ClusterDomain cluster Domain.
ClusterDomain string `json:"clusterDomain,omitempty"`
// FlannelBackend One of none, vxlan, ipsec, host-gw, or wireguard. (default: vxlan)
FlannelBackend string `json:"flannelBackend,omitempty"`
}
// AgentNode defines the desired state of k3s agent node configuration.
type AgentNode struct {
// NodeName k3s node name.
NodeName string `json:"nodeName,omitempty"`
// NodeLabels registering and starting kubelet with set of labels.
NodeLabels []string `json:"nodeLabels,omitempty"`
// NodeTaints registering and starting kubelet with set of taints.
NodeTaints []string `json:"nodeTaints,omitempty"`
// SeLinux Enable SELinux in containerd
SeLinux bool `json:"seLinux,omitempty"`
// LBServerPort
// Local port for supervisor client load-balancer.
// If the supervisor and apiserver are not colocated an additional port 1 less than this port
// will also be used for the apiserver client load-balancer. (default: 6444)
LBServerPort int `json:"lbServerPort,omitempty"`
// DataDir Folder to hold state.
DataDir string `json:"dataDir,omitempty"`
}
// AgentRuntime defines the desired state of k3s agent runtime configuration.
type AgentRuntime struct {
// ContainerRuntimeEndpoint Disable embedded containerd and use alternative CRI implementation.
ContainerRuntimeEndpoint string `json:"containerRuntimeEndpoint,omitempty"`
// PauseImage Customized pause image for containerd or Docker sandbox.
PauseImage string `json:"pauseImage,omitempty"`
// PrivateRegistry Path to a private registry configuration file.
PrivateRegistry string `json:"privateRegistry,omitempty"`
}
// AgentNetworking defines the desired state of k3s agent networking configuration.
type AgentNetworking struct {
// NodeIP IP address to advertise for node.
NodeIP string `json:"nodeIP,omitempty"`
// NodeExternalIP External IP address to advertise for node.
NodeExternalIP string `json:"nodeExternalIP,omitempty"`
// ResolvConf Path to Kubelet resolv.conf file.
ResolvConf string `json:"resolvConf,omitempty"`
}

View File

@ -0,0 +1,120 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)
// K3sConfigSpec defines the desired state of K3sConfig
type K3sConfigSpec struct {
// Files specifies extra files to be passed to user_data upon creation.
// +optional
Files []bootstrapv1.File `json:"files,omitempty"`
// Cluster defines the k3s cluster Options.
Cluster *Cluster `json:"cluster,omitempty"`
// ServerConfiguration defines the k3s server configuration.
// +optional
ServerConfiguration *ServerConfiguration `json:"serverConfiguration,omitempty"`
// AgentConfiguration defines the k3s agent configuration.
// +optional
AgentConfiguration *AgentConfiguration `json:"agentConfiguration,omitempty"`
// PreK3sCommands specifies extra commands to run before k3s setup runs
// +optional
PreK3sCommands []string `json:"preK3sCommands,omitempty"`
// PostK3sCommands specifies extra commands to run after k3s setup runs
// +optional
PostK3sCommands []string `json:"postK3sCommands,omitempty"`
// Version specifies the k3s version
// +optional
Version string `json:"version,omitempty"`
}
// K3sConfigStatus defines the observed state of K3sConfig
type K3sConfigStatus struct {
// Ready indicates the BootstrapData field is ready to be consumed
Ready bool `json:"ready,omitempty"`
BootstrapData []byte `json:"bootstrapData,omitempty"`
// DataSecretName is the name of the secret that stores the bootstrap data script.
// +optional
DataSecretName *string `json:"dataSecretName,omitempty"`
// FailureReason will be set on non-retryable errors
// +optional
FailureReason string `json:"failureReason,omitempty"`
// FailureMessage will be set on non-retryable errors
// +optional
FailureMessage string `json:"failureMessage,omitempty"`
// ObservedGeneration is the latest generation observed by the controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions defines current service state of the K3sConfig.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=k3sconfigs,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of K3sConfig"
// K3sConfig is the Schema for the k3sConfigs API
type K3sConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sConfigSpec `json:"spec,omitempty"`
Status K3sConfigStatus `json:"status,omitempty"`
}
// GetConditions returns the set of conditions for this object.
func (c *K3sConfig) GetConditions() clusterv1.Conditions {
return c.Status.Conditions
}
// SetConditions sets the conditions on this object.
func (c *K3sConfig) SetConditions(conditions clusterv1.Conditions) {
c.Status.Conditions = conditions
}
//+kubebuilder:object:root=true
// K3sConfigList contains a list of K3sConfig
type K3sConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []K3sConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&K3sConfig{}, &K3sConfigList{})
}

View File

@ -0,0 +1,146 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
conflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file"
missingSecretNameMsg = "secret file source must specify non-empty secret name"
missingSecretKeyMsg = "secret file source must specify non-empty secret key"
pathConflictMsg = "path property must be unique among all files"
)
func (c *K3sConfig) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(c).
Complete()
}
// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig,mutating=true,failurePolicy=fail,sideEffects=None,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs,versions=v1beta1,name=default.k3sconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &K3sConfig{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (c *K3sConfig) Default() {
DefaultK3sConfigSpec(&c.Spec)
}
// DefaultK3sConfigSpec defaults a K3sConfigSpec.
func DefaultK3sConfigSpec(c *K3sConfigSpec) {
}
// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs,versions=v1beta1,name=validation.k3sconfig.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Validator = &K3sConfig{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (c *K3sConfig) ValidateCreate() error {
return c.Spec.validate(c.Name)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (c *K3sConfig) ValidateUpdate(old runtime.Object) error {
return c.Spec.validate(c.Name)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (c *K3sConfig) ValidateDelete() error {
return nil
}
func (c *K3sConfigSpec) validate(name string) error {
allErrs := c.Validate(field.NewPath("spec"))
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind("K3sConfig").GroupKind(), name, allErrs)
}
// Validate ensures the K3sConfigSpec is valid.
func (c *K3sConfigSpec) Validate(pathPrefix *field.Path) field.ErrorList {
var allErrs field.ErrorList
allErrs = append(allErrs, c.validateFiles(pathPrefix)...)
return allErrs
}
func (c *K3sConfigSpec) validateFiles(pathPrefix *field.Path) field.ErrorList {
var allErrs field.ErrorList
knownPaths := map[string]struct{}{}
for i := range c.Files {
file := c.Files[i]
if file.Content != "" && file.ContentFrom != nil {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("files").Index(i),
file,
conflictingFileSourceMsg,
),
)
}
// n.b.: if we ever add types besides Secret as a ContentFrom
// Source, we must add webhook validation here for one of the
// sources being non-nil.
if file.ContentFrom != nil {
if file.ContentFrom.Secret.Name == "" {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "name"),
missingSecretNameMsg,
),
)
}
if file.ContentFrom.Secret.Key == "" {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("files").Index(i).Child("contentFrom", "secret", "key"),
missingSecretKeyMsg,
),
)
}
}
_, conflict := knownPaths[file.Path]
if conflict {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("files").Index(i).Child("path"),
file,
pathConflictMsg,
),
)
}
knownPaths[file.Path] = struct{}{}
}
return allErrs
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// K3sConfigTemplateSpec defines the desired state of K3sConfigTemplate
type K3sConfigTemplateSpec struct {
Template K3sConfigTemplateResource `json:"template"`
}
// K3sConfigTemplateResource defines the Template structure
type K3sConfigTemplateResource struct {
Spec K3sConfigSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=k3sconfigtemplates,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of K3sConfigTemplate"
// K3sConfigTemplate is the Schema for the k3sconfigtemplates API
type K3sConfigTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sConfigTemplateSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// K3sConfigTemplateList contains a list of K3sConfigTemplate
type K3sConfigTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []K3sConfigTemplate `json:"items"`
}
func init() {
SchemeBuilder.Register(&K3sConfigTemplate{}, &K3sConfigTemplateList{})
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
func (r *K3sConfigTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// +kubebuilder:webhook:verbs=create;update,path=/mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate,mutating=true,failurePolicy=fail,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigtemplates,versions=v1beta1,name=default.k3sconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &K3sConfigTemplate{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *K3sConfigTemplate) Default() {
DefaultK3sConfigSpec(&r.Spec.Template.Spec)
}
// +kubebuilder:webhook:verbs=create;update,path=/validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigtemplates,versions=v1beta1,name=validation.k3sconfigtemplate.bootstrap.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Validator = &K3sConfigTemplate{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *K3sConfigTemplate) ValidateCreate() error {
return r.Spec.validate(r.Name)
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *K3sConfigTemplate) ValidateUpdate(old runtime.Object) error {
return r.Spec.validate(r.Name)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *K3sConfigTemplate) ValidateDelete() error {
return nil
}
func (r *K3sConfigTemplateSpec) validate(name string) error {
var allErrs field.ErrorList
allErrs = append(allErrs, r.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))...)
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind("K3sConfigTemplate").GroupKind(), name, allErrs)
}

View File

@ -0,0 +1,408 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
apiv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) {
*out = *in
in.Node.DeepCopyInto(&out.Node)
out.Runtime = in.Runtime
out.Networking = in.Networking
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfiguration.
func (in *AgentConfiguration) DeepCopy() *AgentConfiguration {
if in == nil {
return nil
}
out := new(AgentConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AgentNetworking) DeepCopyInto(out *AgentNetworking) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNetworking.
func (in *AgentNetworking) DeepCopy() *AgentNetworking {
if in == nil {
return nil
}
out := new(AgentNetworking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AgentNode) DeepCopyInto(out *AgentNode) {
*out = *in
if in.NodeLabels != nil {
in, out := &in.NodeLabels, &out.NodeLabels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NodeTaints != nil {
in, out := &in.NodeTaints, &out.NodeTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNode.
func (in *AgentNode) DeepCopy() *AgentNode {
if in == nil {
return nil
}
out := new(AgentNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AgentRuntime) DeepCopyInto(out *AgentRuntime) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentRuntime.
func (in *AgentRuntime) DeepCopy() *AgentRuntime {
if in == nil {
return nil
}
out := new(AgentRuntime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
func (in *Cluster) DeepCopy() *Cluster {
if in == nil {
return nil
}
out := new(Cluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Database) DeepCopyInto(out *Database) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
func (in *Database) DeepCopy() *Database {
if in == nil {
return nil
}
out := new(Database)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfig) DeepCopyInto(out *K3sConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfig.
func (in *K3sConfig) DeepCopy() *K3sConfig {
if in == nil {
return nil
}
out := new(K3sConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigList) DeepCopyInto(out *K3sConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]K3sConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigList.
func (in *K3sConfigList) DeepCopy() *K3sConfigList {
if in == nil {
return nil
}
out := new(K3sConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigSpec) DeepCopyInto(out *K3sConfigSpec) {
*out = *in
if in.Files != nil {
in, out := &in.Files, &out.Files
*out = make([]apiv1beta1.File, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Cluster != nil {
in, out := &in.Cluster, &out.Cluster
*out = new(Cluster)
**out = **in
}
if in.ServerConfiguration != nil {
in, out := &in.ServerConfiguration, &out.ServerConfiguration
*out = new(ServerConfiguration)
(*in).DeepCopyInto(*out)
}
if in.AgentConfiguration != nil {
in, out := &in.AgentConfiguration, &out.AgentConfiguration
*out = new(AgentConfiguration)
(*in).DeepCopyInto(*out)
}
if in.PreK3sCommands != nil {
in, out := &in.PreK3sCommands, &out.PreK3sCommands
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PostK3sCommands != nil {
in, out := &in.PostK3sCommands, &out.PostK3sCommands
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigSpec.
func (in *K3sConfigSpec) DeepCopy() *K3sConfigSpec {
if in == nil {
return nil
}
out := new(K3sConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigStatus) DeepCopyInto(out *K3sConfigStatus) {
*out = *in
if in.BootstrapData != nil {
in, out := &in.BootstrapData, &out.BootstrapData
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.DataSecretName != nil {
in, out := &in.DataSecretName, &out.DataSecretName
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(cluster_apiapiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigStatus.
func (in *K3sConfigStatus) DeepCopy() *K3sConfigStatus {
if in == nil {
return nil
}
out := new(K3sConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigTemplate) DeepCopyInto(out *K3sConfigTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplate.
func (in *K3sConfigTemplate) DeepCopy() *K3sConfigTemplate {
if in == nil {
return nil
}
out := new(K3sConfigTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sConfigTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigTemplateList) DeepCopyInto(out *K3sConfigTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]K3sConfigTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateList.
func (in *K3sConfigTemplateList) DeepCopy() *K3sConfigTemplateList {
if in == nil {
return nil
}
out := new(K3sConfigTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sConfigTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigTemplateResource) DeepCopyInto(out *K3sConfigTemplateResource) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateResource.
func (in *K3sConfigTemplateResource) DeepCopy() *K3sConfigTemplateResource {
if in == nil {
return nil
}
out := new(K3sConfigTemplateResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sConfigTemplateSpec) DeepCopyInto(out *K3sConfigTemplateSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sConfigTemplateSpec.
func (in *K3sConfigTemplateSpec) DeepCopy() *K3sConfigTemplateSpec {
if in == nil {
return nil
}
out := new(K3sConfigTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
func (in *Listener) DeepCopy() *Listener {
if in == nil {
return nil
}
out := new(Listener)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Networking) DeepCopyInto(out *Networking) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking.
func (in *Networking) DeepCopy() *Networking {
if in == nil {
return nil
}
out := new(Networking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) {
*out = *in
out.Database = in.Database
out.Listener = in.Listener
out.Networking = in.Networking
in.Agent.DeepCopyInto(&out.Agent)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfiguration.
func (in *ServerConfiguration) DeepCopy() *ServerConfiguration {
if in == nil {
return nil
}
out := new(ServerConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,25 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize

View File

@ -0,0 +1,5 @@
resources:
- certificate.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,19 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io
fieldSpecs:
- kind: Certificate
group: cert-manager.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: cert-manager.io
path: spec/commonName
- kind: Certificate
group: cert-manager.io
path: spec/dnsNames
- kind: Certificate
group: cert-manager.io
path: spec/secretName

View File

@ -0,0 +1,422 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.1
creationTimestamp: null
name: k3sconfigs.bootstrap.cluster.x-k8s.io
spec:
group: bootstrap.cluster.x-k8s.io
names:
categories:
- cluster-api
kind: K3sConfig
listKind: K3sConfigList
plural: k3sconfigs
singular: k3sconfig
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Cluster
jsonPath: .metadata.labels['cluster\.x-k8s\.io/cluster-name']
name: Cluster
type: string
- description: Time duration since creation of K3sConfig
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: K3sConfig is the Schema for the k3sConfigs API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: K3sConfigSpec defines the desired state of K3sConfig
properties:
agentConfiguration:
description: AgentConfiguration defines the k3s agent configuration.
properties:
networking:
description: Networking defines the k3s agent networking configuration.
properties:
nodeExternalIP:
description: NodeExternalIP External IP address to advertise
for node.
type: string
nodeIP:
description: NodeIP IP address to advertise for node.
type: string
resolvConf:
description: ResolvConf Path to Kubelet resolv.conf file.
type: string
type: object
node:
description: Node defines the k3s agent node configuration.
properties:
dataDir:
description: DataDir Folder to hold state.
type: string
lbServerPort:
description: 'LBServerPort Local port for supervisor client
load-balancer. If the supervisor and apiserver are not colocated
an additional port 1 less than this port will also be used
for the apiserver client load-balancer. (default: 6444)'
type: integer
nodeLabels:
description: NodeLabels registering and starting kubelet with
set of labels.
items:
type: string
type: array
nodeName:
description: NodeName k3s node name.
type: string
nodeTaints:
description: NodeTaints registering and starting kubelet with
set of taints.
items:
type: string
type: array
seLinux:
description: SeLinux Enable SELinux in containerd
type: boolean
type: object
runtime:
description: Runtime defines the k3s agent runtime configuration.
properties:
containerRuntimeEndpoint:
description: ContainerRuntimeEndpoint Disable embedded containerd
and use alternative CRI implementation.
type: string
pauseImage:
description: PauseImage Customized pause image for containerd
or Docker sandbox.
type: string
privateRegistry:
description: PrivateRegistry Path to a private registry configuration
file.
type: string
type: object
type: object
cluster:
description: Cluster defines the k3s cluster Options.
properties:
server:
description: Server which server to connect to, used to join a
cluster.
type: string
token:
description: Token shared secret used to join a server or agent
to a cluster.
type: string
tokenFile:
description: TokenFile file containing the cluster-secret/token.
type: string
type: object
files:
description: Files specifies extra files to be passed to user_data
upon creation.
items:
description: File defines the input for generating write_files in
cloud-init.
properties:
append:
description: Append specifies whether to append Content to existing
file if Path exists.
type: boolean
content:
description: Content is the actual content of the file.
type: string
contentFrom:
description: ContentFrom is a referenced source of content to
populate the file.
properties:
secret:
description: Secret represents a secret that should populate
this file.
properties:
key:
description: Key is the key in the secret's data map
for this value.
type: string
name:
description: Name of the secret in the KubeadmBootstrapConfig's
namespace to use.
type: string
required:
- key
- name
type: object
required:
- secret
type: object
encoding:
description: Encoding specifies the encoding of the file contents.
enum:
- base64
- gzip
- gzip+base64
type: string
owner:
description: Owner specifies the ownership of the file, e.g.
"root:root".
type: string
path:
description: Path specifies the full path on disk where to store
the file.
type: string
permissions:
description: Permissions specifies the permissions to assign
to the file, e.g. "0640".
type: string
required:
- path
type: object
type: array
postK3sCommands:
description: PostK3sCommands specifies extra commands to run after
k3s setup runs
items:
type: string
type: array
preK3sCommands:
description: PreK3sCommands specifies extra commands to run before
k3s setup runs
items:
type: string
type: array
serverConfiguration:
description: ServerConfiguration defines the k3s server configuration.
properties:
agent:
description: Agent is the agent configuration.
properties:
networking:
description: Networking defines the k3s agent networking configuration.
properties:
nodeExternalIP:
description: NodeExternalIP External IP address to advertise
for node.
type: string
nodeIP:
description: NodeIP IP address to advertise for node.
type: string
resolvConf:
description: ResolvConf Path to Kubelet resolv.conf file.
type: string
type: object
node:
description: Node defines the k3s agent node configuration.
properties:
dataDir:
description: DataDir Folder to hold state.
type: string
lbServerPort:
description: 'LBServerPort Local port for supervisor client
load-balancer. If the supervisor and apiserver are not
colocated an additional port 1 less than this port will
also be used for the apiserver client load-balancer.
(default: 6444)'
type: integer
nodeLabels:
description: NodeLabels registering and starting kubelet
with set of labels.
items:
type: string
type: array
nodeName:
description: NodeName k3s node name.
type: string
nodeTaints:
description: NodeTaints registering and starting kubelet
with set of taints.
items:
type: string
type: array
seLinux:
description: SeLinux Enable SELinux in containerd
type: boolean
type: object
runtime:
description: Runtime defines the k3s agent runtime configuration.
properties:
containerRuntimeEndpoint:
description: ContainerRuntimeEndpoint Disable embedded
containerd and use alternative CRI implementation.
type: string
pauseImage:
description: PauseImage Customized pause image for containerd
or Docker sandbox.
type: string
privateRegistry:
description: PrivateRegistry Path to a private registry
configuration file.
type: string
type: object
type: object
database:
description: Database is the database configuration.
properties:
clusterInit:
description: ClusterInit initialize a new cluster using embedded
Etcd.
type: boolean
dataStoreCAFile:
description: DataStoreCAFile TLS Certificate Authority file
used to secure datastore backend communication.
type: string
dataStoreCertFile:
description: DataStoreCertFile TLS certification file used
to secure datastore backend communication.
type: string
dataStoreEndPoint:
description: DataStoreEndPoint specify etcd, Mysql, Postgres,
or Sqlite (default) data source name.
type: string
dataStoreKeyFile:
description: DataStoreKeyFile TLS key file used to secure
datastore backend communication.
type: string
type: object
listener:
description: Listener is the listener configuration.
properties:
advertiseAddress:
description: AdvertiseAddress IP address that apiserver uses
to advertise to members of the cluster.
type: string
advertisePort:
description: 'AdvertisePort Port that apiserver uses to advertise
to members of the cluster (default: listen-port).'
type: integer
bindAddress:
description: BindAddress k3s bind address.
type: string
httpsListenPort:
description: HTTPSListenPort HTTPS listen port.
type: integer
tlsSan:
description: TLSSan Add additional hostname or IP as a Subject
Alternative Name in the TLS cert.
type: string
type: object
networking:
description: Networking is the networking configuration.
properties:
clusterCIDR:
description: ClusterCIDR Network CIDR to use for pod IPs.
type: string
clusterDNS:
description: ClusterDNS cluster IP for coredns service. Should
be in your service-cidr range.
type: string
clusterDomain:
description: ClusterDomain cluster Domain.
type: string
flannelBackend:
description: 'FlannelBackend One of none, vxlan, ipsec,
host-gw, or wireguard. (default: vxlan)'
type: string
serviceCIDR:
description: ServiceCIDR Network CIDR to use for services
IPs.
type: string
serviceNodePortRange:
description: ServiceNodePortRange Port range to reserve for
services with NodePort visibility.
type: string
type: object
type: object
version:
description: Version specifies the k3s version
type: string
type: object
status:
description: K3sConfigStatus defines the observed state of K3sConfig
properties:
bootstrapData:
format: byte
type: string
conditions:
description: Conditions defines current service state of the K3sConfig.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
properties:
lastTransitionTime:
description: Last time the condition transitioned from one status
to another. This should be when the underlying condition changed.
If that is not known, then using the time when the API field
changed is acceptable.
format: date-time
type: string
message:
description: A human readable message indicating details about
the transition. This field may be empty.
type: string
reason:
description: The reason for the condition's last transition
in CamelCase. The specific API may choose whether or not this
field is considered a guaranteed API. This field may not be
empty.
type: string
severity:
description: Severity provides an explicit classification of
Reason code, so the users or machines can immediately understand
the current situation and act accordingly. The Severity field
MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
dataSecretName:
description: DataSecretName is the name of the secret that stores
the bootstrap data script.
type: string
failureMessage:
description: FailureMessage will be set on non-retryable errors
type: string
failureReason:
description: FailureReason will be set on non-retryable errors
type: string
observedGeneration:
description: ObservedGeneration is the latest generation observed
by the controller.
format: int64
type: integer
ready:
description: Ready indicates the BootstrapData field is ready to be
consumed
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,368 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.1
creationTimestamp: null
name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io
spec:
group: bootstrap.cluster.x-k8s.io
names:
categories:
- cluster-api
kind: K3sConfigTemplate
listKind: K3sConfigTemplateList
plural: k3sconfigtemplates
singular: k3sconfigtemplate
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Time duration since creation of K3sConfigTemplate
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: K3sConfigTemplate is the Schema for the k3sconfigtemplates API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: K3sConfigTemplateSpec defines the desired state of K3sConfigTemplate
properties:
template:
description: K3sConfigTemplateResource defines the Template structure
properties:
spec:
description: K3sConfigSpec defines the desired state of K3sConfig
properties:
agentConfiguration:
description: AgentConfiguration defines the k3s agent configuration.
properties:
networking:
description: Networking defines the k3s agent networking
configuration.
properties:
nodeExternalIP:
description: NodeExternalIP External IP address to
advertise for node.
type: string
nodeIP:
description: NodeIP IP address to advertise for node.
type: string
resolvConf:
description: ResolvConf Path to Kubelet resolv.conf
file.
type: string
type: object
node:
description: Node defines the k3s agent node configuration.
properties:
dataDir:
description: DataDir Folder to hold state.
type: string
lbServerPort:
description: 'LBServerPort Local port for supervisor
client load-balancer. If the supervisor and apiserver
are not colocated an additional port 1 less than
this port will also be used for the apiserver client
load-balancer. (default: 6444)'
type: integer
nodeLabels:
description: NodeLabels registering and starting kubelet
with set of labels.
items:
type: string
type: array
nodeName:
description: NodeName k3s node name.
type: string
nodeTaints:
description: NodeTaints registering and starting kubelet
with set of taints.
items:
type: string
type: array
seLinux:
description: SeLinux Enable SELinux in containerd
type: boolean
type: object
runtime:
description: Runtime defines the k3s agent runtime configuration.
properties:
containerRuntimeEndpoint:
description: ContainerRuntimeEndpoint Disable embedded
containerd and use alternative CRI implementation.
type: string
pauseImage:
description: PauseImage Customized pause image for
containerd or Docker sandbox.
type: string
privateRegistry:
description: PrivateRegistry Path to a private registry
configuration file.
type: string
type: object
type: object
cluster:
description: Cluster defines the k3s cluster Options.
properties:
server:
description: Server which server to connect to, used to
join a cluster.
type: string
token:
description: Token shared secret used to join a server
or agent to a cluster.
type: string
tokenFile:
description: TokenFile file containing the cluster-secret/token.
type: string
type: object
files:
description: Files specifies extra files to be passed to user_data
upon creation.
items:
description: File defines the input for generating write_files
in cloud-init.
properties:
append:
description: Append specifies whether to append Content
to existing file if Path exists.
type: boolean
content:
description: Content is the actual content of the file.
type: string
contentFrom:
description: ContentFrom is a referenced source of content
to populate the file.
properties:
secret:
description: Secret represents a secret that should
populate this file.
properties:
key:
description: Key is the key in the secret's
data map for this value.
type: string
name:
description: Name of the secret in the KubeadmBootstrapConfig's
namespace to use.
type: string
required:
- key
- name
type: object
required:
- secret
type: object
encoding:
description: Encoding specifies the encoding of the
file contents.
enum:
- base64
- gzip
- gzip+base64
type: string
owner:
description: Owner specifies the ownership of the file,
e.g. "root:root".
type: string
path:
description: Path specifies the full path on disk where
to store the file.
type: string
permissions:
description: Permissions specifies the permissions to
assign to the file, e.g. "0640".
type: string
required:
- path
type: object
type: array
postK3sCommands:
description: PostK3sCommands specifies extra commands to run
after k3s setup runs
items:
type: string
type: array
preK3sCommands:
description: PreK3sCommands specifies extra commands to run
before k3s setup runs
items:
type: string
type: array
serverConfiguration:
description: ServerConfiguration defines the k3s server configuration.
properties:
agent:
description: Agent is the agent configuration.
properties:
networking:
description: Networking defines the k3s agent networking
configuration.
properties:
nodeExternalIP:
description: NodeExternalIP External IP address
to advertise for node.
type: string
nodeIP:
description: NodeIP IP address to advertise for
node.
type: string
resolvConf:
description: ResolvConf Path to Kubelet resolv.conf
file.
type: string
type: object
node:
description: Node defines the k3s agent node configuration.
properties:
dataDir:
description: DataDir Folder to hold state.
type: string
lbServerPort:
description: 'LBServerPort Local port for supervisor
client load-balancer. If the supervisor and
apiserver are not colocated an additional port
1 less than this port will also be used for
the apiserver client load-balancer. (default:
6444)'
type: integer
nodeLabels:
description: NodeLabels registering and starting
kubelet with set of labels.
items:
type: string
type: array
nodeName:
description: NodeName k3s node name.
type: string
nodeTaints:
description: NodeTaints registering and starting
kubelet with set of taints.
items:
type: string
type: array
seLinux:
description: SeLinux Enable SELinux in containerd
type: boolean
type: object
runtime:
description: Runtime defines the k3s agent runtime
configuration.
properties:
containerRuntimeEndpoint:
description: ContainerRuntimeEndpoint Disable
embedded containerd and use alternative CRI
implementation.
type: string
pauseImage:
description: PauseImage Customized pause image
for containerd or Docker sandbox.
type: string
privateRegistry:
description: PrivateRegistry Path to a private
registry configuration file.
type: string
type: object
type: object
database:
description: Database is the database configuration.
properties:
clusterInit:
description: ClusterInit initialize a new cluster
using embedded Etcd.
type: boolean
dataStoreCAFile:
description: DataStoreCAFile TLS Certificate Authority
file used to secure datastore backend communication.
type: string
dataStoreCertFile:
description: DataStoreCertFile TLS certification file
used to secure datastore backend communication.
type: string
dataStoreEndPoint:
description: DataStoreEndPoint specify etcd, Mysql,
Postgres, or Sqlite (default) data source name.
type: string
dataStoreKeyFile:
description: DataStoreKeyFile TLS key file used to
secure datastore backend communication.
type: string
type: object
listener:
description: Listener is the listener configuration.
properties:
advertiseAddress:
description: AdvertiseAddress IP address that apiserver
uses to advertise to members of the cluster.
type: string
advertisePort:
description: 'AdvertisePort Port that apiserver uses
to advertise to members of the cluster (default:
listen-port).'
type: integer
bindAddress:
description: BindAddress k3s bind address.
type: string
httpsListenPort:
description: HTTPSListenPort HTTPS listen port.
type: integer
tlsSan:
description: TLSSan Add additional hostname or IP
as a Subject Alternative Name in the TLS cert.
type: string
type: object
networking:
description: Networking is the networking configuration.
properties:
clusterCIDR:
description: ClusterCIDR Network CIDR to use for pod
IPs.
type: string
clusterDNS:
description: ClusterDNS cluster IP for coredns service.
Should be in your service-cidr range.
type: string
clusterDomain:
description: ClusterDomain cluster Domain.
type: string
flannelBackend:
description: 'FlannelBackend One of none, vxlan,
ipsec, host-gw, or wireguard. (default: vxlan)'
type: string
serviceCIDR:
description: ServiceCIDR Network CIDR to use for services
IPs.
type: string
serviceNodePortRange:
description: ServiceNodePortRange Port range to reserve
for services with NodePort visibility.
type: string
type: object
type: object
version:
description: Version specifies the k3s version
type: string
type: object
type: object
required:
- template
type: object
type: object
served: true
storage: true
subresources: {}

View File

@ -0,0 +1,27 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/bootstrap.cluster.x-k8s.io_k3sconfigs.yaml
- bases/bootstrap.cluster.x-k8s.io_k3sconfigtemplates.yaml
#+kubebuilder:scaffold:crdkustomizeresource
commonLabels:
cluster.x-k8s.io/v1beta1: v1beta1
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
- patches/webhook_in_k3sconfigs.yaml
- patches/webhook_in_k3sconfigtemplates.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
- patches/cainjection_in_k3sconfigs.yaml
- patches/cainjection_in_k3sconfigtemplates.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: k3sconfigs.bootstrap.cluster.x-k8s.io

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io

View File

@ -0,0 +1,18 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: k3sconfigs.bootstrap.cluster.x-k8s.io
spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

View File

@ -0,0 +1,18 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: k3sconfigtemplates.bootstrap.cluster.x-k8s.io
spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

View File

@ -0,0 +1,54 @@
namePrefix: capkk-k3s-bootstrap-
namespace: capkk-k3s-bootstrap-system
commonLabels:
cluster.x-k8s.io/provider: "bootstrap-k3s"
resources:
- namespace.yaml
bases:
- ../rbac
- ../manager
- ../crd
- ../certmanager
- ../webhook
patchesStrategicMerge:
# Provide customizable hook for make targets.
- manager_image_patch.yaml
- manager_pull_policy.yaml
# Enable webhook.
- manager_webhook_patch.yaml
# Inject certificate in the webhook definition.
- webhookcainjection_patch.yaml
configurations:
- kustomizeconfig.yaml
vars:
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: serving-cert # this name should match the one in certificate.yaml
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATE_NAME
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: serving-cert # this name should match the one in certificate.yaml
- name: SERVICE_NAMESPACE # namespace of the service
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: SERVICE_NAME
objref:
kind: Service
version: v1
name: webhook-service

View File

@ -0,0 +1,4 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
varReference:
- kind: Deployment
path: spec/template/spec/volumes/secret/secretName

View File

@ -0,0 +1,11 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- image: docker.io/kubespheredev/k3s-bootstrap-controller:main
name: manager

View File

@ -0,0 +1,11 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
imagePullPolicy: Always

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
volumes:
- name: cert
secret:
secretName: $(SERVICE_NAME)-cert

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system

View File

@ -0,0 +1,14 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)

View File

@ -0,0 +1,2 @@
resources:
- manager.yaml

View File

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- command:
- /manager
args:
- "--leader-elect"
- "--metrics-bind-addr=localhost:8080"
image: controller:latest
name: manager
ports:
- containerPort: 9440
name: healthz
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: healthz
livenessProbe:
httpGet:
path: /healthz
port: healthz
terminationGracePeriodSeconds: 10
serviceAccountName: manager
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane

View File

@ -0,0 +1,6 @@
resources:
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml

View File

@ -0,0 +1,37 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: manager
namespace: system

View File

@ -0,0 +1,48 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- events
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- bootstrap.cluster.x-k8s.io
resources:
- k3sconfigs
- k3sconfigs/finalizers
- k3sconfigs/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- clusters
- clusters/status
- machinepools
- machinepools/status
- machines
- machines/status
verbs:
- get
- list
- watch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: manager
namespace: system

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: manager
namespace: system

View File

@ -0,0 +1,6 @@
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: K3sConfig
metadata:
name: k3sconfig-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,6 @@
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: K3sConfigTemplate
metadata:
name: k3sconfigtemplate-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,5 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- bootstrap_v1beta1_k3sconfig.yaml
- bootstrap_v1beta1_k3sconfigtemplate.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@ -0,0 +1,7 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: true
tests: []

View File

@ -0,0 +1,16 @@
resources:
- bases/config.yaml
patchesJson6902:
- path: patches/basic.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
- path: patches/olm.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
#+kubebuilder:scaffold:patchesJson6902

View File

@ -0,0 +1,10 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: basic
test: basic-check-spec-test

View File

@ -0,0 +1,50 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-bundle-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-crds-have-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-crds-have-resources-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-spec-descriptors-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.22.2
labels:
suite: olm
test: olm-status-descriptors-test

View File

@ -0,0 +1,6 @@
resources:
- manifests.yaml
- service.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,25 @@
# the following config is for teaching kustomize where to look at when substituting vars.
# It requires kustomize v2.1.0 or newer to work properly.
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
namespace:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
varReference:
- path: metadata/annotations

View File

@ -0,0 +1,100 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig
failurePolicy: Fail
name: default.k3sconfig.bootstrap.cluster.x-k8s.io
rules:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- k3sconfigs
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /mutate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate
failurePolicy: Fail
name: default.k3sconfigtemplate.bootstrap.cluster.x-k8s.io
rules:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- k3sconfigtemplates
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfig
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.k3sconfig.bootstrap.cluster.x-k8s.io
rules:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- k3sconfigs
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /validate-bootstrap-cluster-x-k8s-io-v1beta1-k3sconfigtemplate
failurePolicy: Fail
matchPolicy: Equivalent
name: validation.k3sconfigtemplate.bootstrap.cluster.x-k8s.io
rules:
- apiGroups:
- bootstrap.cluster.x-k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- k3sconfigtemplates
sideEffects: None

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: webhook-service
namespace: system
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
control-plane: controller-manager

View File

@ -14,5 +14,5 @@
limitations under the License.
*/
// Package binary define the binaries operations on the remote instance.
package binary
// Package controllers contains k3s config controllers.
package controllers

View File

@ -0,0 +1,764 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/bootstrap/k3s/pkg/cloudinit"
"github.com/kubesphere/kubekey/bootstrap/k3s/pkg/locking"
k3stypes "github.com/kubesphere/kubekey/bootstrap/k3s/pkg/types"
kklog "github.com/kubesphere/kubekey/util/log"
"github.com/kubesphere/kubekey/util/secret"
)
// InitLocker is a lock that is used around kubeadm init.
type InitLocker interface {
Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool
Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool
}
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=k3sconfigs;k3sconfigs/status;k3sconfigs/finalizers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machinesets;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete
// K3sConfigReconciler reconciles a K3sConfig object
type K3sConfigReconciler struct {
client.Client
K3sInitLock InitLocker
// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string
}
// Scope is a scoped struct used during reconciliation.
type Scope struct {
logr.Logger
Config *infrabootstrapv1.K3sConfig
ConfigOwner *bsutil.ConfigOwner
Cluster *clusterv1.Cluster
}
// SetupWithManager sets up the controller with the Manager.
func (r *K3sConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
if r.K3sInitLock == nil {
r.K3sInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient())
}
b := ctrl.NewControllerManagedBy(mgr).
For(&infrabootstrapv1.K3sConfig{}).
WithOptions(options).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue))
if feature.Gates.Enabled(feature.MachinePool) {
b = b.Watches(
&source.Kind{Type: &expv1.MachinePool{}},
handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc),
).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue))
}
c, err := b.Build(r)
if err != nil {
return errors.Wrap(err, "failed setting up with a controller manager")
}
err = c.Watch(
&source.Kind{Type: &clusterv1.Cluster{}},
handler.EnqueueRequestsFromMapFunc(r.ClusterToK3sConfigs),
predicates.All(ctrl.LoggerFrom(ctx),
predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)),
predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue),
),
)
if err != nil {
return errors.Wrap(err, "failed adding Watch for Clusters to controller manager")
}
return nil
}
// Reconcile handles K3sConfig events.
func (r *K3sConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) {
log := ctrl.LoggerFrom(ctx)
// Lookup the kubeadm config
config := &infrabootstrapv1.K3sConfig{}
if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get config")
return ctrl.Result{}, err
}
// AddOwners adds the owners of K3sConfig as k/v pairs to the logger.
// Specifically, it will add K3sControlPlane, MachineSet and MachineDeployment.
ctx, log, err := kklog.AddOwners(ctx, r.Client, config)
if err != nil {
return ctrl.Result{}, err
}
// Look up the owner of this k3s config if there is one
configOwner, err := bsutil.GetConfigOwner(ctx, r.Client, config)
if apierrors.IsNotFound(err) {
// Could not find the owner yet, this is not an error and will rereconcile when the owner gets set.
return ctrl.Result{}, nil
}
if err != nil {
log.Error(err, "Failed to get owner")
return ctrl.Result{}, err
}
if configOwner == nil {
return ctrl.Result{}, nil
}
log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())
log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName()))
ctx = ctrl.LoggerInto(ctx, log)
// Lookup the cluster the config owner is associated with
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
if err != nil {
if errors.Cause(err) == util.ErrNoCluster {
log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind()))
return ctrl.Result{}, nil
}
if apierrors.IsNotFound(err) {
log.Info("Cluster does not exist yet, waiting until it is created")
return ctrl.Result{}, nil
}
log.Error(err, "Could not get cluster with metadata")
return ctrl.Result{}, err
}
if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
}
scope := &Scope{
Logger: log,
Config: config,
ConfigOwner: configOwner,
Cluster: cluster,
}
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(config, r.Client)
if err != nil {
return ctrl.Result{}, err
}
// Attempt to Patch the K3sConfig object and status after each reconciliation if no error occurs.
defer func() {
// always update the readyCondition; the summary is represented using the "1 of x completed" notation.
conditions.SetSummary(config,
conditions.WithConditions(
bootstrapv1.DataSecretAvailableCondition,
bootstrapv1.CertificatesAvailableCondition,
),
)
// Patch ObservedGeneration only if the reconciliation completed successfully
var patchOpts []patch.Option
if retErr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil {
log.Error(retErr, "Failed to patch config")
if retErr == nil {
retErr = err
}
}
}()
switch {
// Wait for the infrastructure to be ready.
case !cluster.Status.InfrastructureReady:
log.Info("Cluster infrastructure is not ready, waiting")
conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
// Reconcile status for machines that already have a secret reference, but our status isn't up-to-date.
// This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects.
case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil):
config.Status.Ready = true
config.Status.DataSecretName = configOwner.DataSecretName()
conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition)
return ctrl.Result{}, nil
// Status is ready means a config has been generated.
case config.Status.Ready:
return ctrl.Result{}, nil
}
// Note: can't use IsFalse here because we need to handle the absence of the condition as well as false.
if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
return r.handleClusterNotInitialized(ctx, scope)
}
// Every other case it's a join scenario
// Nb. in this case ClusterConfiguration and InitConfiguration should not be defined by users, but in case of misconfigurations, CABPK3s simply ignore them
// Unlock any locks that might have been set during init process
r.K3sInitLock.Unlock(ctx, cluster)
// if the AgentConfiguration is missing, create a default one
if config.Spec.AgentConfiguration == nil {
log.Info("Creating default AgentConfiguration")
config.Spec.AgentConfiguration = &infrabootstrapv1.AgentConfiguration{}
}
// it's a control plane join
if configOwner.IsControlPlaneMachine() {
return r.joinControlplane(ctx, scope)
}
// It's a worker join
return r.joinWorker(ctx, scope)
}
func (r *K3sConfigReconciler) handleClusterNotInitialized(ctx context.Context, scope *Scope) (_ ctrl.Result, retErr error) {
// initialize the DataSecretAvailableCondition if missing.
// this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing
// using the DataSecretGeneratedFailedReason
if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
}
// if it's NOT a control plane machine, requeue
if !scope.ConfigOwner.IsControlPlaneMachine() {
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
// if the machine has not ClusterConfiguration and InitConfiguration, requeue
if scope.Config.Spec.ServerConfiguration == nil && scope.Config.Spec.AgentConfiguration == nil {
scope.Info("Control plane is not ready, requeing joining control planes until ready.")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
machine := &clusterv1.Machine{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind())
}
// acquire the init lock so that only the first machine configured
// as control plane get processed here
// if not the first, requeue
if !r.K3sInitLock.Lock(ctx, scope.Cluster, machine) {
scope.Info("A control plane is already being initialized, requeing until control plane is ready")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
defer func() {
if retErr != nil {
if !r.K3sInitLock.Unlock(ctx, scope.Cluster) {
retErr = kerrors.NewAggregate([]error{retErr, errors.New("failed to unlock the kubeadm init lock")})
}
}
}()
scope.Info("Creating BootstrapData for the first control plane")
if scope.Config.Spec.ServerConfiguration == nil {
scope.Config.Spec.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{}
}
// injects into config.ClusterConfiguration values from top level object
r.reconcileTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config)
certificates := secret.NewCertificatesForInitialControlPlane()
err := certificates.LookupOrGenerate(
ctx,
r.Client,
util.ObjectKey(scope.Cluster),
*metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("K3sConfig")),
)
if err != nil {
conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, err
}
conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition)
t, err := r.generateAndStoreToken(ctx, scope)
if err != nil {
return ctrl.Result{}, err
}
initData, err := k3stypes.MarshalInitServerConfiguration(&scope.Config.Spec, t)
if err != nil {
scope.Error(err, "Failed to marshal server configuration")
return ctrl.Result{}, err
}
files, err := r.resolveFiles(ctx, scope.Config)
if err != nil {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, err
}
initConfigFile := bootstrapv1.File{
Path: k3stypes.DefaultK3sConfigLocation,
Content: initData,
Owner: "root:root",
Permissions: "0640",
}
controlPlaneInput := &cloudinit.ControlPlaneInput{
BaseUserData: cloudinit.BaseUserData{
AdditionalFiles: files,
PreK3sCommands: scope.Config.Spec.PreK3sCommands,
PostK3sCommands: scope.Config.Spec.PostK3sCommands,
ConfigFile: initConfigFile,
},
Certificates: certificates,
}
bootstrapInitData, err := cloudinit.NewInitControlPlane(controlPlaneInput)
if err != nil {
scope.Error(err, "Failed to generate user data for bootstrap control plane")
return ctrl.Result{}, err
}
if err := r.storeBootstrapData(ctx, scope, bootstrapInitData); err != nil {
scope.Error(err, "Failed to store bootstrap data")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *K3sConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) {
scope.Info("Creating BootstrapData for the worker node")
// Ensure that agentConfiguration is properly set for joining node on the current cluster.
if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil {
return ctrl.Result{}, err
} else if !res.IsZero() {
return res, nil
}
joinWorkerData, err := k3stypes.MarshalJoinAgentConfiguration(scope.Config.Spec.AgentConfiguration)
if err != nil {
scope.Error(err, "Failed to marshal join configuration")
return ctrl.Result{}, err
}
files, err := r.resolveFiles(ctx, scope.Config)
if err != nil {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, err
}
joinConfigFile := bootstrapv1.File{
Path: k3stypes.DefaultK3sConfigLocation,
Content: joinWorkerData,
Owner: "root:root",
Permissions: "0640",
}
workerJoinInput := &cloudinit.NodeInput{
BaseUserData: cloudinit.BaseUserData{
AdditionalFiles: files,
PreK3sCommands: scope.Config.Spec.PreK3sCommands,
PostK3sCommands: scope.Config.Spec.PostK3sCommands,
ConfigFile: joinConfigFile,
},
}
cloudInitData, err := cloudinit.NewNode(workerJoinInput)
if err != nil {
scope.Error(err, "Failed to generate user data for bootstrap control plane")
return ctrl.Result{}, err
}
if err := r.storeBootstrapData(ctx, scope, cloudInitData); err != nil {
scope.Error(err, "Failed to store bootstrap data")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *K3sConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) {
scope.Info("Creating BootstrapData for the joining control plane")
if !scope.ConfigOwner.IsControlPlaneMachine() {
return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind())
}
if scope.Config.Spec.Cluster == nil {
scope.Config.Spec.Cluster = &infrabootstrapv1.Cluster{}
}
// Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster.
if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil {
return ctrl.Result{}, err
} else if !res.IsZero() {
return res, nil
}
joinData, err := k3stypes.MarshalJoinServerConfiguration(scope.Config.Spec.ServerConfiguration)
if err != nil {
scope.Error(err, "Failed to marshal join configuration")
return ctrl.Result{}, err
}
files, err := r.resolveFiles(ctx, scope.Config)
if err != nil {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, err
}
joinConfigFile := bootstrapv1.File{
Path: k3stypes.DefaultK3sConfigLocation,
Content: joinData,
Owner: "root:root",
Permissions: "0640",
}
controlPlaneJoinInput := &cloudinit.ControlPlaneInput{
BaseUserData: cloudinit.BaseUserData{
AdditionalFiles: files,
PreK3sCommands: scope.Config.Spec.PreK3sCommands,
PostK3sCommands: scope.Config.Spec.PostK3sCommands,
ConfigFile: joinConfigFile,
},
}
cloudInitData, err := cloudinit.NewJoinControlPlane(controlPlaneJoinInput)
if err != nil {
scope.Error(err, "Failed to generate user data for bootstrap control plane")
return ctrl.Result{}, err
}
if err := r.storeBootstrapData(ctx, scope, cloudInitData); err != nil {
scope.Error(err, "Failed to store bootstrap data")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *K3sConfigReconciler) generateAndStoreToken(ctx context.Context, scope *Scope) (string, error) {
t, err := bootstraputil.GenerateBootstrapToken()
if err != nil {
return "", errors.Wrap(err, "unable to generate bootstrap token")
}
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-token", scope.Cluster.Name),
Namespace: scope.Config.Namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: scope.Cluster.Name,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: bootstrapv1.GroupVersion.String(),
Kind: "K3sConfig",
Name: scope.Config.Name,
UID: scope.Config.UID,
Controller: pointer.Bool(true),
},
},
},
Data: map[string][]byte{
"value": []byte(t),
},
Type: clusterv1.ClusterSecretType,
}
// as secret creation and scope.Config status patch are not atomic operations
// it is possible that secret creation happens but the config.Status patches are not applied
if err := r.Client.Create(ctx, s); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", errors.Wrapf(err, "failed to create token for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
if err := r.Client.Update(ctx, s); err != nil {
return "", errors.Wrapf(err, "failed to update bootstrap token secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
}
return t, nil
}
// resolveFiles maps .Spec.Files into cloudinit.Files, resolving any object references
// along the way.
func (r *K3sConfigReconciler) resolveFiles(ctx context.Context, cfg *infrabootstrapv1.K3sConfig) ([]bootstrapv1.File, error) {
collected := make([]bootstrapv1.File, 0, len(cfg.Spec.Files))
for i := range cfg.Spec.Files {
in := cfg.Spec.Files[i]
if in.ContentFrom != nil {
data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in)
if err != nil {
return nil, errors.Wrapf(err, "failed to resolve file source")
}
in.ContentFrom = nil
in.Content = string(data)
}
collected = append(collected, in)
}
return collected, nil
}
// resolveSecretFileContent returns file content fetched from a referenced secret object.
func (r *K3sConfigReconciler) resolveSecretFileContent(ctx context.Context, ns string, source bootstrapv1.File) ([]byte, error) {
s := &corev1.Secret{}
key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name}
if err := r.Client.Get(ctx, key, s); err != nil {
if apierrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "secret not found: %s", key)
}
return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key)
}
data, ok := s.Data[source.ContentFrom.Secret.Key]
if !ok {
return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key)
}
return data, nil
}
// storeBootstrapData creates a new secret with the data passed in as input,
// sets the reference in the configuration status and ready to true.
func (r *K3sConfigReconciler) storeBootstrapData(ctx context.Context, scope *Scope, data []byte) error {
log := ctrl.LoggerFrom(ctx)
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scope.Config.Name,
Namespace: scope.Config.Namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: scope.Cluster.Name,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: bootstrapv1.GroupVersion.String(),
Kind: "K3sConfig",
Name: scope.Config.Name,
UID: scope.Config.UID,
Controller: pointer.Bool(true),
},
},
},
Data: map[string][]byte{
"value": data,
},
Type: clusterv1.ClusterSecretType,
}
// as secret creation and scope.Config status patch are not atomic operations
// it is possible that secret creation happens but the config.Status patches are not applied
if err := r.Client.Create(ctx, s); err != nil {
if !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed to create bootstrap data secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
log.Info("bootstrap data secret for K3sConfig already exists, updating", "Secret", klog.KObj(s))
if err := r.Client.Update(ctx, s); err != nil {
return errors.Wrapf(err, "failed to update bootstrap data secret for K3sConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
}
scope.Config.Status.DataSecretName = pointer.String(s.Name)
scope.Config.Status.Ready = true
conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition)
return nil
}
func (r *K3sConfigReconciler) reconcileDiscovery(ctx context.Context, cluster *clusterv1.Cluster, config *infrabootstrapv1.K3sConfig) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
// if config already contains a file discovery configuration, respect it without further validations
if config.Spec.Cluster.TokenFile != "" {
return ctrl.Result{}, nil
}
// if BootstrapToken already contains an APIServerEndpoint, respect it; otherwise inject the APIServerEndpoint endpoint defined in cluster status
apiServerEndpoint := config.Spec.Cluster.Server
if apiServerEndpoint == "" {
if !cluster.Spec.ControlPlaneEndpoint.IsValid() {
log.V(1).Info("Waiting for Cluster Controller to set Cluster.Server")
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}
apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
config.Spec.Cluster.Server = fmt.Sprintf("https://%s", apiServerEndpoint)
log.V(3).Info("Altering Cluster.Server", "Server", apiServerEndpoint)
}
// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
if config.Spec.Cluster.Token == "" {
s := &corev1.Secret{}
obj := client.ObjectKey{
Namespace: config.Namespace,
Name: fmt.Sprintf("%s-token", cluster.Name),
}
if err := r.Client.Get(ctx, obj, s); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to get token for K3sConfig %s/%s", config.Namespace, config.Name)
}
config.Spec.Cluster.Token = string(s.Data["value"])
log.V(3).Info("Altering Cluster.Token")
}
return ctrl.Result{}, nil
}
// MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
// request for reconciliation of K3sConfig.
func (r *K3sConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.Request {
m, ok := o.(*clusterv1.Machine)
if !ok {
panic(fmt.Sprintf("Expected a Machine but got a %T", o))
}
var result []ctrl.Request
if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig") {
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
// MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
// request for reconciliation of K3sConfig.
func (r *K3sConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []ctrl.Request {
m, ok := o.(*expv1.MachinePool)
if !ok {
panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
}
var result []ctrl.Request
configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef
if configRef != nil && configRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
// ClusterToK3sConfigs is a handler.ToRequestsFunc to be used to enqueue
// requests for reconciliation of K3sConfig.
func (r *K3sConfigReconciler) ClusterToK3sConfigs(o client.Object) []ctrl.Request {
var result []ctrl.Request
c, ok := o.(*clusterv1.Cluster)
if !ok {
panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
}
selectors := []client.ListOption{
client.InNamespace(c.Namespace),
client.MatchingLabels{
clusterv1.ClusterLabelName: c.Name,
},
}
machineList := &clusterv1.MachineList{}
if err := r.Client.List(context.TODO(), machineList, selectors...); err != nil {
return nil
}
for _, m := range machineList.Items {
if m.Spec.Bootstrap.ConfigRef != nil &&
m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
}
if feature.Gates.Enabled(feature.MachinePool) {
machinePoolList := &expv1.MachinePoolList{}
if err := r.Client.List(context.TODO(), machinePoolList, selectors...); err != nil {
return nil
}
for _, mp := range machinePoolList.Items {
if mp.Spec.Template.Spec.Bootstrap.ConfigRef != nil &&
mp.Spec.Template.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: mp.Namespace, Name: mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
}
}
return result
}
// reconcileTopLevelObjectSettings injects into config.ClusterConfiguration values from top level objects like cluster and machine.
// The implementation func respect user provided config values, but in case some of them are missing, values from top level objects are used.
func (r *K3sConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, config *infrabootstrapv1.K3sConfig) {
log := ctrl.LoggerFrom(ctx)
// If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined
if cluster.Spec.ClusterNetwork != nil {
if config.Spec.ServerConfiguration.Networking.ClusterDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" {
config.Spec.ServerConfiguration.Networking.ClusterDomain = cluster.Spec.ClusterNetwork.ServiceDomain
log.V(3).Info("Altering ServerConfiguration.Networking.ClusterDomain", "ClusterDomain", config.Spec.ServerConfiguration.Networking.ClusterDomain)
}
if config.Spec.ServerConfiguration.Networking.ServiceCIDR == "" &&
cluster.Spec.ClusterNetwork.Services != nil &&
len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
config.Spec.ServerConfiguration.Networking.ServiceCIDR = cluster.Spec.ClusterNetwork.Services.String()
log.V(3).Info("Altering ServerConfiguration.Networking.ServiceCIDR", "ServiceCIDR", config.Spec.ServerConfiguration.Networking.ServiceCIDR)
}
if config.Spec.ServerConfiguration.Networking.ClusterCIDR == "" &&
cluster.Spec.ClusterNetwork.Pods != nil &&
len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 {
config.Spec.ServerConfiguration.Networking.ClusterCIDR = cluster.Spec.ClusterNetwork.Pods.String()
log.V(3).Info("Altering ServerConfiguration.Networking.ClusterCIDR", "ClusterCIDR", config.Spec.ServerConfiguration.Networking.ClusterCIDR)
}
}
// If there are no Version settings defined, use Version from machine, if defined
if config.Spec.Version == "" && machine.Spec.Version != nil {
config.Spec.Version = *machine.Spec.Version
log.V(3).Info("Altering Spec.Version", "Version", config.Spec.Version)
}
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
//var (
// env *envtest.Environment
// ctx = ctrl.SetupSignalHandler()
//)
//
//func TestMain(m *testing.M) {
// os.Exit(envtest.Run(ctx, envtest.RunInput{
// M: m,
// SetupEnv: func(e *envtest.Environment) { env = e },
// }))
//}

View File

@ -0,0 +1,15 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

193
bootstrap/k3s/main.go Normal file
View File

@ -0,0 +1,193 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package main
package main
import (
"flag"
"fmt"
"math/rand"
"os"
"time"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/healthz"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/bootstrap/k3s/controllers"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme))
utilruntime.Must(expv1.AddToScheme(scheme))
utilruntime.Must(infrabootstrapv1.AddToScheme(scheme))
utilruntime.Must(infracontrolplanev1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
var (
metricsAddr string
enableLeaderElection bool
leaderElectionLeaseDuration time.Duration
leaderElectionRenewDeadline time.Duration
leaderElectionRetryPeriod time.Duration
k3sConfigConcurrency int
healthAddr string
watchFilterValue string
watchNamespace string
syncPeriod time.Duration
webhookPort int
webhookCertDir string
)
func main() {
klog.InitFlags(nil)
rand.Seed(time.Now().UnixNano())
initFlags(pflag.CommandLine)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
ctrl.SetLogger(klogr.New())
ctx := ctrl.SetupSignalHandler()
restConfig := ctrl.GetConfigOrDie()
restConfig.UserAgent = remote.DefaultClusterAPIUserAgent("cluster-api-k3s-bootstrap-manager")
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "k3s-bootstrap-manager-leader-election-capkk",
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaseDuration: &leaderElectionLeaseDuration,
RenewDeadline: &leaderElectionRenewDeadline,
RetryPeriod: &leaderElectionRetryPeriod,
SyncPeriod: &syncPeriod,
ClientDisableCacheFor: []client.Object{
&corev1.ConfigMap{},
&corev1.Secret{},
},
Namespace: watchNamespace,
Port: webhookPort,
HealthProbeBindAddress: healthAddr,
CertDir: webhookCertDir,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err := (&controllers.K3sConfigReconciler{
Client: mgr.GetClient(),
WatchFilterValue: watchFilterValue,
}).SetupWithManager(ctx, mgr, concurrency(k3sConfigConcurrency)); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "K3sConfig")
os.Exit(1)
}
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
if err = (&infrabootstrapv1.K3sConfig{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "K3sConfig")
os.Exit(1)
}
if err = (&infrabootstrapv1.K3sConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "K3sConfigTemplate")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func initFlags(fs *pflag.FlagSet) {
fs.StringVar(&metricsAddr, "metrics-bind-addr", "localhost:8080",
"The address the metric endpoint binds to.")
fs.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second,
"Interval at which non-leader candidates will wait to force acquire leadership (duration string)")
fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second,
"Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)")
fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second,
"Duration the LeaderElector clients should wait between tries of actions (duration string)")
fs.StringVar(&watchNamespace, "namespace", "",
"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.")
fs.StringVar(&healthAddr, "health-addr", ":9440",
"The address the health endpoint binds to.")
fs.IntVar(&k3sConfigConcurrency, "k3sconfig-concurrency", 10,
"Number of kubeadm configs to process simultaneously")
fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute,
"The minimum interval at which watched resources are reconciled (e.g. 15m)")
fs.StringVar(&watchFilterValue, "watch-filter", "",
fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel))
fs.IntVar(&webhookPort, "webhook-port", 9443,
"Webhook Server port")
fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/",
"Webhook cert dir, only used when webhook-port is specified.")
feature.MutableGates.AddFlag(fs)
}
func concurrency(c int) controller.Options {
return controller.Options{MaxConcurrentReconciles: c}
}

View File

@ -0,0 +1,99 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
import (
"bytes"
_ "embed"
"text/template"
"github.com/pkg/errors"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)
const (
// sentinelFileCommand writes a file to /run/cluster-api to signal successful Kubernetes bootstrapping in a way that
// works both for Linux and Windows OS.
sentinelFileCommand = "echo success > /run/cluster-api/bootstrap-success.complete"
cloudConfigHeader = `## template: jinja
#cloud-config
`
)
// BaseUserData is shared across all the various types of files written to disk.
type BaseUserData struct {
Header string
PreK3sCommands []string
PostK3sCommands []string
AdditionalFiles []bootstrapv1.File
WriteFiles []bootstrapv1.File
ConfigFile bootstrapv1.File
SentinelFileCommand string
}
func (input *BaseUserData) prepare() error {
input.Header = cloudConfigHeader
input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...)
k3sScriptFile, err := generateBootstrapScript(input)
if err != nil {
return errors.Wrap(err, "failed to generate user data for machine install k3s")
}
input.WriteFiles = append(input.WriteFiles, *k3sScriptFile)
input.SentinelFileCommand = sentinelFileCommand
return nil
}
func generate(kind string, tpl string, data interface{}) ([]byte, error) {
tm := template.New(kind).Funcs(defaultTemplateFuncMap)
if _, err := tm.Parse(filesTemplate); err != nil {
return nil, errors.Wrap(err, "failed to parse files template")
}
if _, err := tm.Parse(commandsTemplate); err != nil {
return nil, errors.Wrap(err, "failed to parse commands template")
}
t, err := tm.Parse(tpl)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse %s template", kind)
}
var out bytes.Buffer
if err := t.Execute(&out, data); err != nil {
return nil, errors.Wrapf(err, "failed to generate %s template", kind)
}
return out.Bytes(), nil
}
var (
//go:embed k3s-install.sh
k3sBootstrapScript string
)
func generateBootstrapScript(input interface{}) (*bootstrapv1.File, error) {
k3sScript, err := generate("K3sInstallScript", k3sBootstrapScript, input)
if err != nil {
return nil, errors.Wrap(err, "failed to bootstrap script for machine joins")
}
return &bootstrapv1.File{
Path: "/usr/local/bin/k3s-install.sh",
Owner: "root",
Permissions: "0755",
Content: string(k3sScript),
}, nil
}

View File

@ -0,0 +1,26 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
const (
commandsTemplate = `{{- define "commands" -}}
{{ range . }}
- {{printf "%q" .}}
{{- end -}}
{{- end -}}
`
)

View File

@ -0,0 +1,64 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
import (
"github.com/pkg/errors"
"github.com/kubesphere/kubekey/util/secret"
)
const (
controlPlaneCloudInit = `{{.Header}}
{{template "files" .WriteFiles}}
- path: /run/cluster-api/placeholder
owner: root:root
permissions: '0640'
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
{{- template "commands" .PostK3sCommands }}
`
)
// ControlPlaneInput defines the context to generate a controlplane instance user data.
type ControlPlaneInput struct {
BaseUserData
secret.Certificates
ServerConfiguration string
}
// NewInitControlPlane returns the clouding string to be used on initializing a controlplane instance.
func NewInitControlPlane(input *ControlPlaneInput) ([]byte, error) {
input.Header = cloudConfigHeader
input.WriteFiles = input.Certificates.AsFiles()
input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...)
k3sScriptFile, err := generateBootstrapScript(input)
if err != nil {
return nil, errors.Wrap(err, "failed to generate user data for machine install k3s")
}
input.WriteFiles = append(input.WriteFiles, *k3sScriptFile)
input.SentinelFileCommand = sentinelFileCommand
userData, err := generate("InitControlplane", controlPlaneCloudInit, input)
if err != nil {
return nil, err
}
return userData, nil
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
import (
"github.com/pkg/errors"
)
const (
controlPlaneJoinCloudInit = `{{.Header}}
{{template "files" .WriteFiles}}
- path: /run/cluster-api/placeholder
owner: root:root
permissions: '0640'
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
{{- template "commands" .PostK3sCommands }}
`
)
// NewJoinControlPlane returns the cloudinit string to be used on joining a control plane instance.
func NewJoinControlPlane(input *ControlPlaneInput) ([]byte, error) {
if err := input.prepare(); err != nil {
return nil, err
}
userData, err := generate("JoinControlplane", controlPlaneJoinCloudInit, input)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate user data for machine joining control plane")
}
return userData, err
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cloudinit implements kubeadm cloudinit functionality.
package cloudinit

View File

@ -0,0 +1,40 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
const (
filesTemplate = `{{ define "files" -}}
write_files:{{ range . }}
- path: {{.Path}}
{{ if ne .Encoding "" -}}
encoding: "{{.Encoding}}"
{{ end -}}
{{ if ne .Owner "" -}}
owner: {{.Owner}}
{{ end -}}
{{ if ne .Permissions "" -}}
permissions: '{{.Permissions}}'
{{ end -}}
{{ if .Append -}}
append: true
{{ end -}}
content: |
{{.Content | Indent 6}}
{{- end -}}
{{- end -}}
`
)

View File

@ -0,0 +1,946 @@
#!/bin/sh
#
# Copyright 2022 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
set -o noglob
# Usage:
# curl ... | ENV_VAR=... sh -
# or
# ENV_VAR=... ./install.sh
#
# Example:
# Installing a server without traefik:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -
# Installing an agent to point at a server:
# curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh -
#
# Environment variables:
# - K3S_*
# Environment variables which begin with K3S_ will be preserved for the
# systemd service to use. Setting K3S_URL without explicitly setting
# a systemd exec command will default the command to "agent", and we
# enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set.
#
# - INSTALL_K3S_SKIP_DOWNLOAD
# If set to true will not download k3s hash or binary.
#
# - INSTALL_K3S_FORCE_RESTART
# If set to true will always restart the K3s service
#
# - INSTALL_K3S_SYMLINK
# If set to 'skip' will not create symlinks, 'force' will overwrite,
# default will symlink if command does not exist in path.
#
# - INSTALL_K3S_SKIP_ENABLE
# If set to true will not enable or start k3s service.
#
# - INSTALL_K3S_SKIP_START
# If set to true will not start k3s service.
#
# - INSTALL_K3S_VERSION
# Version of k3s to download from github. Will attempt to download from the
# stable channel if not specified.
#
# - INSTALL_K3S_COMMIT
# Commit of k3s to download from temporary cloud storage.
# * (for developer & QA use)
#
# - INSTALL_K3S_BIN_DIR
# Directory to install k3s binary, links, and uninstall script to, or use
# /usr/local/bin as the default
#
# - INSTALL_K3S_BIN_DIR_READ_ONLY
# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces
# setting INSTALL_K3S_SKIP_DOWNLOAD=true
#
# - INSTALL_K3S_SYSTEMD_DIR
# Directory to install systemd service and environment files to, or use
# /etc/systemd/system as the default
#
# - INSTALL_K3S_EXEC or script arguments
# Command with flags to use for launching k3s in the systemd service, if
# the command is not specified will default to "agent" if K3S_URL is set
# or "server" if not. The final systemd command resolves to a combination
# of EXEC and script args ($@).
#
# The following commands result in the same behavior:
# curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s -
# curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik
# curl ... | sh -s - server --disable=traefik
# curl ... | sh -s - --disable=traefik
#
# - INSTALL_K3S_NAME
# Name of systemd service to create, will default from the k3s exec command
# if not specified. If specified the name will be prefixed with 'k3s-'.
#
# - INSTALL_K3S_TYPE
# Type of systemd service to create, will default from the k3s exec command
# if not specified.
#
# - INSTALL_K3S_SELINUX_WARN
# If set to true will continue if k3s-selinux policy is not found.
#
# - INSTALL_K3S_SKIP_SELINUX_RPM
# If set to true will skip automatic installation of the k3s RPM.
#
# - INSTALL_K3S_CHANNEL_URL
# Channel URL for fetching k3s download URL.
# Defaults to 'https://update.k3s.io/v1-release/channels'.
#
# - INSTALL_K3S_CHANNEL
# Channel to use for fetching k3s download URL.
# Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
DOWNLOADER=
# --- helper functions for logs ---
info()
{
echo '[INFO] ' "$@"
}
warn()
{
echo '[WARN] ' "$@" >&2
}
fatal()
{
echo '[ERROR] ' "$@" >&2
exit 1
}
# --- fatal if no systemd or openrc ---
verify_system() {
if [ -x /sbin/openrc-run ]; then
HAS_OPENRC=true
return
fi
if [ -x /bin/systemctl ] || type systemctl > /dev/null 2>&1; then
HAS_SYSTEMD=true
return
fi
fatal 'Can not find systemd or openrc to use as a process supervisor for k3s'
}
# --- add quotes to command arguments ---
quote() {
for arg in "$@"; do
printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/"
done
}
# --- add indentation and trailing slash to quoted args ---
quote_indent() {
printf ' \\\n'
for arg in "$@"; do
printf '\t%s \\\n' "$(quote "$arg")"
done
}
# --- escape most punctuation characters, except quotes, forward slash, and space ---
escape() {
printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;'
}
# --- escape double quotes ---
escape_dq() {
printf '%s' "$@" | sed -e 's/"/\\"/g'
}
# --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise ---
verify_k3s_url() {
case "${K3S_URL}" in
"")
;;
https://*)
;;
*)
fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})"
;;
esac
}
# --- define needed environment variables ---
setup_env() {
# --- use command args if passed or create default ---
case "$1" in
# --- if we only have flags discover if command should be server or agent ---
(-*|"")
if [ -z "${K3S_URL}" ]; then
CMD_K3S=server
else
if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then
fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined."
fi
CMD_K3S=agent
fi
;;
# --- command is provided ---
(*)
CMD_K3S=$1
shift
;;
esac
verify_k3s_url
CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")"
# --- use systemd name if defined or create default ---
if [ -n "${INSTALL_K3S_NAME}" ]; then
SYSTEM_NAME=k3s-${INSTALL_K3S_NAME}
else
if [ "${CMD_K3S}" = server ]; then
SYSTEM_NAME=k3s
else
SYSTEM_NAME=k3s-${CMD_K3S}
fi
fi
# --- check for invalid characters in system name ---
valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' )
if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then
invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g')
fatal "Invalid characters for system name:
${SYSTEM_NAME}
${invalid_chars}"
fi
# --- use sudo if we are not already root ---
SUDO=sudo
if [ $(id -u) -eq 0 ]; then
SUDO=
fi
# --- use systemd type if defined or create default ---
if [ -n "${INSTALL_K3S_TYPE}" ]; then
SYSTEMD_TYPE=${INSTALL_K3S_TYPE}
else
SYSTEMD_TYPE=notify
fi
# --- use binary install directory if defined or create default ---
if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then
BIN_DIR=${INSTALL_K3S_BIN_DIR}
else
# --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists
BIN_DIR=/usr/local/bin
if ! $SUDO sh -c "touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test"; then
if [ -d /opt/bin ]; then
BIN_DIR=/opt/bin
fi
fi
fi
# --- use systemd directory if defined or create default ---
if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then
SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}"
else
SYSTEMD_DIR=/etc/systemd/system
fi
# --- set related files from system name ---
SERVICE_K3S=${SYSTEM_NAME}.service
UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh}
KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh}
# --- use service or environment location depending on systemd/openrc ---
if [ "${HAS_SYSTEMD}" = true ]; then
FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S}
FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env
elif [ "${HAS_OPENRC}" = true ]; then
$SUDO mkdir -p /etc/rancher/k3s
FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME}
FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env
fi
# --- get hash of config & exec for currently installed k3s ---
PRE_INSTALL_HASHES=$(get_installed_hashes)
# --- if bin directory is read only skip download ---
if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then
INSTALL_K3S_SKIP_DOWNLOAD=true
fi
# --- setup channel values
INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'}
INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'}
}
# --- check if skip download environment variable set ---
can_skip_download_binary() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != binary ]; then
return 1
fi
}
can_skip_download_selinux() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then
return 1
fi
}
# --- verify an executable k3s binary is installed ---
verify_k3s_is_executable() {
if [ ! -x ${BIN_DIR}/k3s ]; then
fatal "Executable k3s binary not found at ${BIN_DIR}/k3s"
fi
}
# --- set arch and suffix, fatal if architecture not supported ---
setup_verify_arch() {
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
case $ARCH in
amd64)
ARCH=amd64
SUFFIX=
;;
x86_64)
ARCH=amd64
SUFFIX=
;;
arm64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
s390x)
ARCH=s390x
SUFFIX=-${ARCH}
;;
aarch64)
ARCH=arm64
SUFFIX=-${ARCH}
;;
arm*)
ARCH=arm
SUFFIX=-${ARCH}hf
;;
*)
fatal "Unsupported architecture $ARCH"
esac
}
# --- verify existence of network downloader executable ---
verify_downloader() {
# Return failure if it doesn't exist or is no executable
[ -x "$(command -v $1)" ] || return 1
# Set verified executable as our downloader program and return success
DOWNLOADER=$1
return 0
}
# --- create temporary directory and cleanup when done ---
setup_tmp() {
TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)
TMP_HASH=${TMP_DIR}/k3s.hash
TMP_BIN=${TMP_DIR}/k3s.bin
cleanup() {
code=$?
set +e
trap - EXIT
rm -rf ${TMP_DIR}
exit $code
}
trap cleanup INT EXIT
}
# --- use desired k3s version if defined or find version from channel ---
get_release_version() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="commit ${INSTALL_K3S_COMMIT}"
elif [ -n "${INSTALL_K3S_VERSION}" ]; then
VERSION_K3S=${INSTALL_K3S_VERSION}
else
info "Finding release for channel ${INSTALL_K3S_CHANNEL}"
version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}"
case $DOWNLOADER in
curl)
VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||')
;;
wget)
VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||')
;;
*)
fatal "Incorrect downloader executable '$DOWNLOADER'"
;;
esac
fi
info "Using ${VERSION_K3S} as release"
}
# --- download from github url ---
download() {
[ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'
case $DOWNLOADER in
curl)
curl -o $1 -sfL $2
;;
wget)
wget -qO $1 $2
;;
*)
fatal "Incorrect executable '$DOWNLOADER'"
;;
esac
# Abort if download command failed
[ $? -eq 0 ] || fatal 'Download failed'
}
# --- download hash from github url ---
download_hash() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum
else
HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt
fi
info "Downloading hash ${HASH_URL}"
download ${TMP_HASH} ${HASH_URL}
HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH})
HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}
}
# --- check hash against installed version ---
installed_hash_matches() {
if [ -x ${BIN_DIR}/k3s ]; then
HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s)
HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then
return
fi
fi
return 1
}
# --- download binary from github url ---
download_binary() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then
BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}
else
BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}
fi
info "Downloading binary ${BIN_URL}"
download ${TMP_BIN} ${BIN_URL}
}
# --- verify downloaded binary hash ---
verify_binary() {
info "Verifying binary download"
HASH_BIN=$(sha256sum ${TMP_BIN})
HASH_BIN=${HASH_BIN%%[[:blank:]]*}
if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then
fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}"
fi
}
# --- setup permissions and move binary to system directory ---
setup_binary() {
chmod 755 ${TMP_BIN}
info "Installing k3s to ${BIN_DIR}/k3s"
$SUDO chown root:root ${TMP_BIN}
$SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s
}
# --- setup selinux policy ---
setup_selinux() {
case ${INSTALL_K3S_CHANNEL} in
*testing)
rpm_channel=testing
;;
*latest)
rpm_channel=latest
;;
*)
rpm_channel=stable
;;
esac
rpm_site="rpm.rancher.io"
if [ "${rpm_channel}" = "testing" ]; then
rpm_site="rpm-testing.rancher.io"
fi
[ -r /etc/os-release ] && . /etc/os-release
if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then
rpm_target=sle
rpm_site_infix=microos
package_installer=zypper
elif [ "${VERSION_ID%%.*}" = "7" ]; then
rpm_target=el7
rpm_site_infix=centos/7
package_installer=yum
else
rpm_target=el8
rpm_site_infix=centos/8
package_installer=yum
fi
if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
package_installer=dnf
fi
policy_hint="please install:
${package_installer} install -y container-selinux
${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/k3s-selinux-0.4-1.${rpm_target}.noarch.rpm
"
if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then
info "Skipping installation of SELinux RPM"
elif [ "${ID_LIKE:-}" != coreos ] && [ "${VARIANT_ID:-}" != coreos ]; then
install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix}
fi
policy_error=fatal
if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
policy_error=warn
fi
if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then
if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then
$policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}"
fi
elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then
if [ -x /usr/sbin/transactional-update ]; then
warn "Please reboot your machine to activate the changes and avoid data loss."
else
$policy_error "Failed to find the k3s-selinux policy, ${policy_hint}"
fi
fi
}
install_selinux_rpm() {
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then
repodir=/etc/yum.repos.d
if [ -d /etc/zypp/repos.d ]; then
repodir=/etc/zypp/repos.d
fi
set +o noglob
$SUDO rm -f ${repodir}/rancher-k3s-common*.repo
set -o noglob
if [ -r /etc/redhat-release ] && [ "${3}" = "el7" ]; then
$SUDO yum install -y yum-utils
$SUDO yum-config-manager --enable rhel-7-server-extras-rpms
fi
$SUDO tee ${repodir}/rancher-k3s-common.repo >/dev/null << EOF
[rancher-k3s-common-${2}]
name=Rancher K3s Common (${2})
baseurl=https://${1}/k3s/${2}/common/${4}/noarch
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://${1}/public.key
EOF
case ${3} in
sle)
rpm_installer="zypper --gpg-auto-import-keys"
if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}"
: "${INSTALL_K3S_SKIP_START:=true}"
fi
;;
*)
rpm_installer="yum"
;;
esac
if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
rpm_installer=dnf
fi
# shellcheck disable=SC2086
$SUDO ${rpm_installer} install -y "k3s-selinux"
fi
return
}
# --- download and verify k3s ---
download_and_verify() {
if can_skip_download_binary; then
info 'Skipping k3s download and verify'
verify_k3s_is_executable
return
fi
setup_verify_arch
verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'
setup_tmp
get_release_version
download_hash
if installed_hash_matches; then
info 'Skipping binary downloaded, installed k3s matches hash'
return
fi
download_binary
verify_binary
setup_binary
}
# --- add additional utility links ---
create_symlinks() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
[ "${INSTALL_K3S_SYMLINK}" = skip ] && return
for cmd in kubectl crictl ctr; do
if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
which_cmd=$(command -v ${cmd} 2>/dev/null || true)
if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
info "Creating ${BIN_DIR}/${cmd} symlink to k3s"
$SUDO ln -sf k3s ${BIN_DIR}/${cmd}
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}"
fi
else
info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists"
fi
done
}
# --- create killall script ---
create_killall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating killall script ${KILLALL_K3S_SH}"
$SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF
#!/bin/sh
[ $(id -u) -eq 0 ] || exec sudo $0 $@
for bin in /var/lib/rancher/k3s/data/**/bin/; do
[ -d $bin ] && export PATH=$PATH:$bin:$bin/aux
done
set -x
for service in /etc/systemd/system/k3s*.service; do
[ -s $service ] && systemctl stop $(basename $service)
done
for service in /etc/init.d/k3s*; do
[ -x $service ] && $service stop
done
pschildren() {
ps -e -o ppid= -o pid= | \
sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \
grep -w "^$1" | \
cut -f2
}
pstree() {
for pid in $@; do
echo $pid
for child in $(pschildren $pid); do
pstree $child
done
done
}
killtree() {
kill -9 $(
{ set +x; } 2>/dev/null;
pstree $@;
set -x;
) 2>/dev/null
}
getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
do_unmount_and_remove() {
set +x
while read -r _ path _; do
case "$path" in $1*) echo "$path" ;; esac
done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"'
set -x
}
do_unmount_and_remove '/run/k3s'
do_unmount_and_remove '/var/lib/rancher/k3s'
do_unmount_and_remove '/var/lib/kubelet/pods'
do_unmount_and_remove '/var/lib/kubelet/plugins'
do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
ip link delete kube-ipvs0
ip link delete flannel-wg
ip link delete flannel-wg-v6
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | grep -v flannel | iptables-restore
ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -v flannel | ip6tables-restore
EOF
$SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH}
}
# --- create uninstall script ---
create_uninstall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating uninstall script ${UNINSTALL_K3S_SH}"
$SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF
#!/bin/sh
set -x
[ \$(id -u) -eq 0 ] || exec sudo \$0 \$@
${KILLALL_K3S_SH}
if command -v systemctl; then
systemctl disable ${SYSTEM_NAME}
systemctl reset-failed ${SYSTEM_NAME}
systemctl daemon-reload
fi
if command -v rc-update; then
rc-update delete ${SYSTEM_NAME} default
fi
rm -f ${FILE_K3S_SERVICE}
rm -f ${FILE_K3S_ENV}
remove_uninstall() {
rm -f ${UNINSTALL_K3S_SH}
}
trap remove_uninstall EXIT
if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then
set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x
exit
fi
for cmd in kubectl crictl ctr; do
if [ -L ${BIN_DIR}/\$cmd ]; then
rm -f ${BIN_DIR}/\$cmd
fi
done
rm -rf /etc/rancher/k3s
rm -rf /run/k3s
rm -rf /run/flannel
rm -rf /var/lib/rancher/k3s
rm -rf /var/lib/kubelet
rm -f ${BIN_DIR}/k3s
rm -f ${KILLALL_K3S_SH}
if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type zypper >/dev/null 2>&1; then
uninstall_cmd="zypper remove -y k3s-selinux"
if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
uninstall_cmd="transactional-update --no-selfupdate -d run \$uninstall_cmd"
fi
\$uninstall_cmd
rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo
fi
EOF
$SUDO chmod 755 ${UNINSTALL_K3S_SH}
$SUDO chown root:root ${UNINSTALL_K3S_SH}
}
# --- disable current service if loaded --
systemd_disable() {
$SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true
$SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true
}
# --- capture current env and create file containing k3s_ variables ---
create_env_file() {
info "env: Creating environment file ${FILE_K3S_ENV}"
$SUDO touch ${FILE_K3S_ENV}
$SUDO chmod 0600 ${FILE_K3S_ENV}
sh -c export | while read x v; do echo $v; done | grep -E '^(K3S|CONTAINERD)_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null
sh -c export | while read x v; do echo $v; done | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null
}
# --- write systemd service file ---
create_systemd_service_file() {
info "systemd: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
Type=${SYSTEMD_TYPE}
EnvironmentFile=-/etc/default/%N
EnvironmentFile=-/etc/sysconfig/%N
EnvironmentFile=-${FILE_K3S_ENV}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/k3s \\
${CMD_K3S_EXEC}
EOF
}
# --- write openrc service file ---
create_openrc_service_file() {
LOG_FILE=/var/log/${SYSTEM_NAME}.log
info "openrc: Creating service file ${FILE_K3S_SERVICE}"
$SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF
#!/sbin/openrc-run
depend() {
after network-online
want cgroups
}
start_pre() {
rm -f /tmp/k3s.*
}
supervisor=supervise-daemon
name=${SYSTEM_NAME}
command="${BIN_DIR}/k3s"
command_args="$(escape_dq "${CMD_K3S_EXEC}")
>>${LOG_FILE} 2>&1"
output_log=${LOG_FILE}
error_log=${LOG_FILE}
pidfile="/var/run/${SYSTEM_NAME}.pid"
respawn_delay=5
respawn_max=0
set -o allexport
if [ -f /etc/environment ]; then source /etc/environment; fi
if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi
set +o allexport
EOF
$SUDO chmod 0755 ${FILE_K3S_SERVICE}
$SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF
${LOG_FILE} {
missingok
notifempty
copytruncate
}
EOF
}
# --- write systemd or openrc service file ---
create_service_file() {
[ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file
[ "${HAS_OPENRC}" = true ] && create_openrc_service_file
return 0
}
# --- get hashes of the current k3s bin and service files
get_installed_hashes() {
$SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true
}
# --- enable and start systemd service ---
systemd_enable() {
info "systemd: Enabling ${SYSTEM_NAME} unit"
$SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null
$SUDO systemctl daemon-reload >/dev/null
}
systemd_start() {
info "systemd: Starting ${SYSTEM_NAME}"
$SUDO systemctl restart ${SYSTEM_NAME}
}
# --- enable and start openrc service ---
openrc_enable() {
info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel"
$SUDO rc-update add ${SYSTEM_NAME} default >/dev/null
}
openrc_start() {
info "openrc: Starting ${SYSTEM_NAME}"
$SUDO ${FILE_K3S_SERVICE} restart
}
# --- startup systemd or openrc service ---
service_enable_and_start() {
if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ];
then
info 'Failed to find memory cgroup, you may need to add "cgroup_memory=1 cgroup_enable=memory" to your linux cmdline (/boot/cmdline.txt on a Raspberry Pi)'
fi
[ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return
[ "${HAS_SYSTEMD}" = true ] && systemd_enable
[ "${HAS_OPENRC}" = true ] && openrc_enable
[ "${INSTALL_K3S_SKIP_START}" = true ] && return
POST_INSTALL_HASHES=$(get_installed_hashes)
if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_K3S_FORCE_RESTART}" != true ]; then
info 'No change detected so skipping service start'
return
fi
[ "${HAS_SYSTEMD}" = true ] && systemd_start
[ "${HAS_OPENRC}" = true ] && openrc_start
return 0
}
# --- re-evaluate args to include env command ---
eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@")
# --- run the install process --
{
verify_system
setup_env "$@"
download_and_verify
setup_selinux
create_symlinks
create_killall
create_uninstall
systemd_disable
create_env_file
create_service_file
service_enable_and_start
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
import (
"github.com/pkg/errors"
)
const (
workerCloudInit = `{{.Header}}
{{template "files" .WriteFiles}}
- path: /run/cluster-api/placeholder
owner: root:root
permissions: '0640'
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
{{- template "commands" .PostK3sCommands }}
`
)
// NodeInput defines the context to generate an agent node cloud-init.
type NodeInput struct {
BaseUserData
}
// NewNode returns the cloud-init for joining a node instance.
func NewNode(input *NodeInput) ([]byte, error) {
if err := input.prepare(); err != nil {
return nil, err
}
userData, err := generate("JoinWorker", workerCloudInit, input)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate user data for machine joining worker node")
}
return userData, err
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudinit
import (
"strings"
"text/template"
)
var (
defaultTemplateFuncMap = template.FuncMap{
"Indent": templateYAMLIndent,
}
)
func templateYAMLIndent(i int, input string) string {
split := strings.Split(input, "\n")
ident := "\n" + strings.Repeat(" ", i)
return strings.Repeat(" ", i) + strings.Join(split, ident)
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package locking implements locking functionality.
package locking
import (
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const semaphoreInformationKey = "lock-information"
// ControlPlaneInitMutex uses a ConfigMap to synchronize cluster initialization.
type ControlPlaneInitMutex struct {
client client.Client
}
// NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init.
func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex {
return &ControlPlaneInitMutex{
client: client,
}
}
// Lock allows a control plane node to be the first and only node to run kubeadm init.
func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
}, sema.ConfigMap)
switch {
case apierrors.IsNotFound(err):
break
case err != nil:
log.Error(err, "Failed to acquire init lock")
return false
default: // Successfully found an existing config map.
info, err := sema.information()
if err != nil {
log.Error(err, "Failed to get information about the existing init lock")
return false
}
// The machine requesting the lock is the machine that created the lock, therefore the lock is acquired.
if info.MachineName == machine.Name {
return true
}
// If the machine that created the lock can not be found unlock the mutex.
if err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: info.MachineName,
}, &clusterv1.Machine{}); err != nil {
log.Error(err, "Failed to get machine holding init lock")
if apierrors.IsNotFound(err) {
c.Unlock(ctx, cluster)
}
}
log.Info(fmt.Sprintf("Waiting for Machine %s to initialize", info.MachineName))
return false
}
// Adds owner reference, namespace and name
sema.setMetadata(cluster)
// Adds the additional information
if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil {
log.Error(err, "Failed to acquire init lock while setting semaphore information")
return false
}
log.Info("Attempting to acquire the lock")
err = c.client.Create(ctx, sema.ConfigMap)
switch {
case apierrors.IsAlreadyExists(err):
log.Info("Cannot acquire the init lock. The init lock has been acquired by someone else")
return false
case err != nil:
log.Error(err, "Error acquiring the init lock")
return false
default:
return true
}
}
// Unlock releases the lock.
func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
}, sema.ConfigMap)
switch {
case apierrors.IsNotFound(err):
log.Info("Control plane init lock not found, it may have been released already")
return true
case err != nil:
log.Error(err, "Error unlocking the control plane init lock")
return false
default:
// Delete the config map semaphore if there is no error fetching it
if err := c.client.Delete(ctx, sema.ConfigMap); err != nil {
if apierrors.IsNotFound(err) {
return true
}
log.Error(err, "Error deleting the config map underlying the control plane init lock")
return false
}
return true
}
}
type information struct {
MachineName string `json:"machineName"`
}
type semaphore struct {
*corev1.ConfigMap
}
func newSemaphore() *semaphore {
return &semaphore{&corev1.ConfigMap{}}
}
func configMapName(clusterName string) string {
return fmt.Sprintf("%s-lock", clusterName)
}
func (s semaphore) information() (*information, error) {
li := &information{}
if err := json.Unmarshal([]byte(s.Data[semaphoreInformationKey]), li); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal semaphore information")
}
return li, nil
}
func (s semaphore) setInformation(information *information) error {
b, err := json.Marshal(information)
if err != nil {
return errors.Wrap(err, "failed to marshal semaphore information")
}
s.Data = map[string]string{}
s.Data[semaphoreInformationKey] = string(b)
return nil
}
func (s *semaphore) setMetadata(cluster *clusterv1.Cluster) {
s.ObjectMeta = metav1.ObjectMeta{
Namespace: cluster.Namespace,
Name: configMapName(cluster.Name),
Labels: map[string]string{
clusterv1.ClusterLabelName: cluster.Name,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: cluster.APIVersion,
Kind: cluster.Kind,
Name: cluster.Name,
UID: cluster.UID,
},
},
}
}

View File

@ -0,0 +1,308 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package locking
import (
"context"
"errors"
"fmt"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
const (
clusterName = "test-cluster"
clusterNamespace = "test-namespace"
)
var (
ctx = ctrl.SetupSignalHandler()
)
func TestControlPlaneInitMutex_Lock(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed())
g.Expect(corev1.AddToScheme(scheme)).To(Succeed())
uid := types.UID("test-uid")
tests := []struct {
name string
client client.Client
shouldAcquire bool
}{
{
name: "should successfully acquire lock if the config cannot be found",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
shouldAcquire: true,
},
{
name: "should not acquire lock if already exits",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace,
},
}).Build(),
},
shouldAcquire: false,
},
{
name: "should not acquire lock if cannot create config map",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, configMapName(clusterName)),
createError: errors.New("create error"),
},
shouldAcquire: false,
},
{
name: "should not acquire lock if config map already exists while creating",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
createError: apierrors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
shouldAcquire: false,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
gs := NewWithT(t)
l := &ControlPlaneInitMutex{
client: tc.client,
}
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
Name: clusterName,
UID: uid,
},
}
machine := &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("machine-%s", cluster.Name),
},
}
gs.Expect(l.Lock(ctx, cluster, machine)).To(Equal(tc.shouldAcquire))
})
}
}
func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed())
g.Expect(corev1.AddToScheme(scheme)).To(Succeed())
newMachineName := "new-machine"
tests := []struct {
name string
client client.Client
expectedMachineName string
}{
{
name: "should not give the lock to new machine if the machine that created it does exist",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace},
Data: map[string]string{
"lock-information": "{\"machineName\":\"existent-machine\"}",
}},
&clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "existent-machine",
Namespace: clusterNamespace,
},
},
).Build(),
},
expectedMachineName: "existent-machine",
},
{
name: "should give the lock to new machine if the machine that created it does not exist",
client: &fakeClient{
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace},
Data: map[string]string{
"lock-information": "{\"machineName\":\"non-existent-machine\"}",
}},
).Build(),
},
expectedMachineName: newMachineName,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
l := &ControlPlaneInitMutex{
client: tc.client,
}
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
Name: clusterName,
},
}
machine := &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: newMachineName,
},
}
g.Eventually(func(g Gomega) error {
l.Lock(ctx, cluster, machine)
cm := &corev1.ConfigMap{}
g.Expect(tc.client.Get(ctx, client.ObjectKey{
Name: configMapName(clusterName),
Namespace: cluster.Namespace,
}, cm)).To(Succeed())
info, err := semaphore{cm}.information()
g.Expect(err).To(BeNil())
g.Expect(info.MachineName).To(Equal(tc.expectedMachineName))
return nil
}, "20s").Should(Succeed())
})
}
}
func TestControlPlaneInitMutex_UnLock(t *testing.T) {
uid := types.UID("test-uid")
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace,
},
}
tests := []struct {
name string
client client.Client
shouldRelease bool
}{
{
name: "should release lock by deleting config map",
client: &fakeClient{
Client: fake.NewClientBuilder().Build(),
},
shouldRelease: true,
},
{
name: "should not release lock if cannot delete config map",
client: &fakeClient{
Client: fake.NewClientBuilder().WithObjects(configMap).Build(),
deleteError: errors.New("delete error"),
},
shouldRelease: false,
},
{
name: "should release lock if config map does not exist",
client: &fakeClient{
Client: fake.NewClientBuilder().Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
shouldRelease: true,
},
{
name: "should not release lock if error while getting config map",
client: &fakeClient{
Client: fake.NewClientBuilder().Build(),
getError: errors.New("get error"),
},
shouldRelease: false,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
gs := NewWithT(t)
l := &ControlPlaneInitMutex{
client: tc.client,
}
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
Name: clusterName,
UID: uid,
},
}
gs.Expect(l.Unlock(ctx, cluster)).To(Equal(tc.shouldRelease))
})
}
}
type fakeClient struct {
client.Client
getError error
createError error
deleteError error
}
func (fc *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
if fc.getError != nil {
return fc.getError
}
return fc.Client.Get(ctx, key, obj)
}
func (fc *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
if fc.createError != nil {
return fc.createError
}
return fc.Client.Create(ctx, obj, opts...)
}
func (fc *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
if fc.deleteError != nil {
return fc.deleteError
}
return fc.Client.Delete(ctx, obj, opts...)
}

View File

@ -0,0 +1,104 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
// DefaultK3sConfigLocation is the default location for the k3s config file.
const DefaultK3sConfigLocation = "/etc/rancher/k3s/config.yaml"
// K3sServerConfiguration is the configuration for the k3s server.
type K3sServerConfiguration struct {
// Database
DataStoreEndPoint string `json:"datastore-endpoint,omitempty"`
DataStoreCAFile string `json:"datastore-cafile,omitempty"`
DataStoreCertFile string `json:"datastore-certfile,omitempty"`
DataStoreKeyFile string `json:"datastore-keyfile,omitempty"`
// Cluster
Token string `json:"token,omitempty"`
TokenFile string `json:"token-file,omitempty"`
Server string `json:"server,omitempty"`
CloudInit bool `json:"cloud-init,omitempty"`
// Listener
// BindAddress k3s bind address.
BindAddress string `json:"bind-address,omitempty"`
// HTTPSListenPort HTTPS listen port.
HTTPSListenPort int `json:"https-listen-port,omitempty"`
// AdvertiseAddress IP address that apiserver uses to advertise to members of the cluster.
AdvertiseAddress string `json:"advertise-address,omitempty"`
// AdvertisePort Port that apiserver uses to advertise to members of the cluster (default: listen-port).
AdvertisePort int `json:"advertise-port,omitempty"`
// TLSSan Add additional hostname or IP as a Subject Alternative Name in the TLS cert.
TLSSan string `json:"tls-san,omitempty"`
// Networking
// ClusterCIDR Network CIDR to use for pod IPs.
ClusterCIDR string `json:"cluster-cidr,omitempty"`
// ServiceCIDR Network CIDR to use for services IPs.
ServiceCIDR string `json:"service-cidr,omitempty"`
// ServiceNodePortRange Port range to reserve for services with NodePort visibility.
ServiceNodePortRange string `json:"service-node-port-range,omitempty"`
// ClusterDNS cluster IP for coredns service. Should be in your service-cidr range.
ClusterDNS string `json:"cluster-dns,omitempty"`
// ClusterDomain cluster Domain.
ClusterDomain string `json:"cluster-domain,omitempty"`
// FlannelBackend One of none, vxlan, ipsec, host-gw, or wireguard. (default: vxlan)
FlannelBackend string `json:"flannel-backend,omitempty"`
// Agent
K3sAgentConfiguration `json:",inline"`
}
// K3sAgentConfiguration is the configuration for the k3s agent.
type K3sAgentConfiguration struct {
// Cluster
Token string `json:"token,omitempty"`
TokenFile string `json:"token-file,omitempty"`
Server string `json:"server,omitempty"`
// NodeName k3s node name.
NodeName string `json:"node-name,omitempty"`
// NodeLabels registering and starting kubelet with set of labels.
NodeLabels []string `json:"node-label,omitempty"`
// NodeTaints registering and starting kubelet with set of taints.
NodeTaints []string `json:"node-taint,omitempty"`
// SeLinux Enable SELinux in containerd
SeLinux bool `json:"selinux,omitempty"`
// LBServerPort
// Local port for supervisor client load-balancer.
// If the supervisor and apiserver are not colocated an additional port 1 less than this port
// will also be used for the apiserver client load-balancer. (default: 6444)
LBServerPort int `json:"lb-server-port,omitempty"`
// DataDir Folder to hold state.
DataDir string `json:"data-dir,omitempty"`
// Runtime
// ContainerRuntimeEndpoint Disable embedded containerd and use alternative CRI implementation.
ContainerRuntimeEndpoint string `json:"container-runtime-endpoint,omitempty"`
// PauseImage Customized pause image for containerd or Docker sandbox.
PauseImage string `json:"pause-image,omitempty"`
// PrivateRegistry Path to a private registry configuration file.
PrivateRegistry string `json:"private-registry,omitempty"`
// Networking
// NodeIP IP address to advertise for node.
NodeIP string `json:"node-ip,omitempty"`
// NodeExternalIP External IP address to advertise for node.
NodeExternalIP string `json:"node-external-ip,omitempty"`
// ResolvConf Path to Kubelet resolv.conf file.
ResolvConf string `json:"resolv-conf,omitempty"`
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package types contains k3s config types.
package types

View File

@ -0,0 +1,101 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"github.com/jinzhu/copier"
kubeyaml "sigs.k8s.io/yaml"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
// MarshalInitServerConfiguration marshals the ServerConfiguration object into a string.
func MarshalInitServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec, token string) (string, error) {
obj := spec.ServerConfiguration
serverConfig := &K3sServerConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
return "", err
}
serverConfig.Token = token
serverConfig.CloudInit = spec.ServerConfiguration.Database.ClusterInit
serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{
NodeName: obj.Agent.Node.NodeName,
NodeLabels: obj.Agent.Node.NodeLabels,
NodeTaints: obj.Agent.Node.NodeTaints,
SeLinux: obj.Agent.Node.SeLinux,
LBServerPort: obj.Agent.Node.LBServerPort,
DataDir: obj.Agent.Node.DataDir,
ContainerRuntimeEndpoint: obj.Agent.Runtime.ContainerRuntimeEndpoint,
PauseImage: obj.Agent.Runtime.PauseImage,
PrivateRegistry: obj.Agent.Runtime.PrivateRegistry,
NodeIP: obj.Agent.Networking.NodeIP,
NodeExternalIP: obj.Agent.Networking.NodeExternalIP,
ResolvConf: obj.Agent.Networking.ResolvConf,
}
b, err := kubeyaml.Marshal(serverConfig)
if err != nil {
return "", err
}
return string(b), nil
}
// MarshalJoinServerConfiguration marshals the join ServerConfiguration object into a string.
func MarshalJoinServerConfiguration(obj *infrabootstrapv1.ServerConfiguration) (string, error) {
serverConfig := &K3sServerConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
return "", err
}
serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{
NodeName: obj.Agent.Node.NodeName,
NodeLabels: obj.Agent.Node.NodeLabels,
NodeTaints: obj.Agent.Node.NodeTaints,
SeLinux: obj.Agent.Node.SeLinux,
LBServerPort: obj.Agent.Node.LBServerPort,
DataDir: obj.Agent.Node.DataDir,
ContainerRuntimeEndpoint: obj.Agent.Runtime.ContainerRuntimeEndpoint,
PauseImage: obj.Agent.Runtime.PauseImage,
PrivateRegistry: obj.Agent.Runtime.PrivateRegistry,
NodeIP: obj.Agent.Networking.NodeIP,
NodeExternalIP: obj.Agent.Networking.NodeExternalIP,
ResolvConf: obj.Agent.Networking.ResolvConf,
}
b, err := kubeyaml.Marshal(serverConfig)
if err != nil {
return "", err
}
return string(b), nil
}
// MarshalJoinAgentConfiguration marshals the join AgentConfiguration object into a string.
func MarshalJoinAgentConfiguration(obj *infrabootstrapv1.AgentConfiguration) (string, error) {
serverConfig := &K3sAgentConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
return "", err
}
b, err := kubeyaml.Marshal(serverConfig)
if err != nil {
return "", err
}
return string(b), nil
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"testing"
)
func Test_calculateNextStr(t *testing.T) {
tests := []struct {
currentVersion string
desiredVersion string
want string
wantErr bool
errMsg string
}{
{
currentVersion: "v1.21.5",
desiredVersion: "v1.22.5",
want: "v1.22.5",
wantErr: false,
},
{
currentVersion: "v1.21.5",
desiredVersion: "v1.23.5",
want: "v1.22.12",
wantErr: false,
},
{
currentVersion: "v1.17.5",
desiredVersion: "v1.18.5",
want: "",
wantErr: true,
errMsg: "the target version v1.18.5 is not supported",
},
{
currentVersion: "v1.17.5",
desiredVersion: "v1.21.5",
want: "",
wantErr: true,
errMsg: "Kubernetes minor version v1.18.x is not supported",
},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
got, err := calculateNextStr(tt.currentVersion, tt.desiredVersion)
if (err != nil) != tt.wantErr {
t.Errorf("calculateNextStr() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("calculateNextStr() got = %v, want %v", got, tt.want)
}
if err != nil && err.Error() != tt.errMsg {
t.Errorf("calculateNextStr() error = %v, want %v", err, tt.errMsg)
}
})
}
}

View File

@ -616,12 +616,15 @@ func (c *CalculateNextVersion) Execute(_ connector.Runtime) error {
if !ok {
return errors.New("get upgrade plan Kubernetes version failed by pipeline cache")
}
nextVersionStr := calculateNextStr(currentVersion, planVersion)
nextVersionStr, err := calculateNextStr(currentVersion, planVersion)
if err != nil {
return errors.Wrap(err, "calculate next version failed")
}
c.KubeConf.Cluster.Kubernetes.Version = nextVersionStr
return nil
}
func calculateNextStr(currentVersion, desiredVersion string) string {
func calculateNextStr(currentVersion, desiredVersion string) (string, error) {
current := versionutil.MustParseSemantic(currentVersion)
target := versionutil.MustParseSemantic(desiredVersion)
var nextVersionMinor uint
@ -632,7 +635,10 @@ func calculateNextStr(currentVersion, desiredVersion string) string {
}
if nextVersionMinor == target.Minor() {
return desiredVersion
if _, ok := files.FileSha256["kubeadm"]["amd64"][desiredVersion]; !ok {
return "", errors.Errorf("the target version %s is not supported", desiredVersion)
}
return desiredVersion, nil
} else {
nextVersionPatchList := make([]int, 0)
for supportVersionStr := range files.FileSha256["kubeadm"]["amd64"] {
@ -644,9 +650,12 @@ func calculateNextStr(currentVersion, desiredVersion string) string {
sort.Ints(nextVersionPatchList)
nextVersion := current.WithMinor(nextVersionMinor)
if len(nextVersionPatchList) == 0 {
return "", errors.Errorf("Kubernetes minor version v%d.%d.x is not supported", nextVersion.Major(), nextVersion.Minor())
}
nextVersion = nextVersion.WithPatch(uint(nextVersionPatchList[len(nextVersionPatchList)-1]))
return fmt.Sprintf("v%s", nextVersion.String())
return fmt.Sprintf("v%s", nextVersion.String()), nil
}
}

View File

@ -128,6 +128,10 @@ spec:
description: The hostname on which the API server is serving.
type: string
type: object
distribution:
description: Distribution represents the Kubernetes distribution type
of the cluster.
type: string
nodes:
description: Nodes represents the information about the nodes available
to the cluster

View File

@ -149,6 +149,10 @@ spec:
description: The hostname on which the API server is serving.
type: string
type: object
distribution:
description: Distribution represents the Kubernetes distribution
type of the cluster.
type: string
nodes:
description: Nodes represents the information about the nodes
available to the cluster

View File

@ -15,20 +15,20 @@ commonLabels:
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_kkclusters.yaml
#- patches/webhook_in_kkclustertemplates.yaml
#- patches/webhook_in_kkmachines.yaml
#- patches/webhook_in_kkmachinetemplates.yaml
#- patches/webhook_in_kkinstances.yaml
- patches/webhook_in_kkclusters.yaml
- patches/webhook_in_kkclustertemplates.yaml
- patches/webhook_in_kkmachines.yaml
- patches/webhook_in_kkmachinetemplates.yaml
- patches/webhook_in_kkinstances.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_kkclusters.yaml
#- patches/cainjection_in_kkclustertemplates.yaml
#- patches/cainjection_in_kkmachines.yaml
#- patches/cainjection_in_kkmachinetemplates.yaml
#- patches/cainjection_in_kkinstances.yaml
- patches/cainjection_in_kkclusters.yaml
- patches/cainjection_in_kkclustertemplates.yaml
- patches/cainjection_in_kkmachines.yaml
- patches/cainjection_in_kkmachinetemplates.yaml
- patches/cainjection_in_kkinstances.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.

View File

@ -4,4 +4,4 @@ kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: kkclusters.infrastructure.kubekey.kubesphere.io
name: kkclusters.infrastructure.cluster.x-k8s.io

View File

@ -2,15 +2,17 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kkclusters.infrastructure.kubekey.kubesphere.io
name: kkclusters.infrastructure.cluster.x-k8s.io
spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -7,10 +7,12 @@ spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -7,10 +7,12 @@ spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -7,10 +7,12 @@ spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -7,10 +7,12 @@ spec:
conversion:
strategy: Webhook
webhook:
conversionReviewVersions: ["v1", "v1beta1"]
clientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -7,5 +7,5 @@ spec:
template:
spec:
containers:
- image: docker.io/kubespheredev/capkk-manager:main
- image: docker.io/kubespheredev/capkk-controller:main
name: manager

View File

@ -35,14 +35,19 @@ spec:
livenessProbe:
httpGet:
path: /healthz
port: 8081
port: 9440
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
port: 9440
initialDelaySeconds: 5
periodSeconds: 10
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane

View File

@ -70,7 +70,7 @@ type KKInstanceReconciler struct {
sshClientFactory func(scope *scope.InstanceScope) ssh.Interface
bootstrapFactory func(sshClient ssh.Interface, scope scope.LBScope, instanceScope *scope.InstanceScope) service.Bootstrap
repositoryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.Repository
binaryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.BinaryService
binaryFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope, distribution string) service.BinaryService
containerManagerFactory func(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.ContainerManager
provisioningFactory func(sshClient ssh.Interface, format bootstrapv1.Format) service.Provisioning
WatchFilterValue string
@ -101,11 +101,11 @@ func (r *KKInstanceReconciler) getRepositoryService(sshClient ssh.Interface, sco
return repository.NewService(sshClient, scope, instanceScope)
}
func (r *KKInstanceReconciler) getBinaryService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.BinaryService {
func (r *KKInstanceReconciler) getBinaryService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope, distribution string) service.BinaryService {
if r.binaryFactory != nil {
return r.binaryFactory(sshClient, scope, instanceScope)
return r.binaryFactory(sshClient, scope, instanceScope, distribution)
}
return binary.NewService(sshClient, scope, instanceScope)
return binary.NewService(sshClient, scope, instanceScope, distribution)
}
func (r *KKInstanceReconciler) getContainerManager(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceScope *scope.InstanceScope) service.ContainerManager {
@ -330,14 +330,7 @@ func (r *KKInstanceReconciler) reconcileNormal(ctx context.Context, instanceScop
sshClient := r.getSSHClient(instanceScope)
phases := []func(context.Context, ssh.Interface, *scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error{
r.reconcileBootstrap,
r.reconcileRepository,
r.reconcileBinaryService,
r.reconcileContainerManager,
r.reconcileProvisioning,
}
phases := r.phaseFactory(kkInstanceScope)
for _, phase := range phases {
pollErr := wait.PollImmediate(r.WaitKKInstanceInterval, r.WaitKKInstanceTimeout, func() (done bool, err error) {
if err := phase(ctx, sshClient, instanceScope, kkInstanceScope, lbScope); err != nil {

View File

@ -32,6 +32,29 @@ import (
"github.com/kubesphere/kubekey/pkg/service"
)
func (r *KKInstanceReconciler) phaseFactory(kkInstanceScope scope.KKInstanceScope) []func(context.Context, ssh.Interface,
*scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error {
var phases []func(context.Context, ssh.Interface, *scope.InstanceScope, scope.KKInstanceScope, scope.LBScope) error
switch kkInstanceScope.Distribution() {
case infrav1.KUBERNETES:
phases = append(phases,
r.reconcileBootstrap,
r.reconcileRepository,
r.reconcileBinaryService,
r.reconcileContainerManager,
r.reconcileProvisioning,
)
case infrav1.K3S:
phases = append(phases,
r.reconcileBootstrap,
r.reconcileRepository,
r.reconcileBinaryService,
r.reconcileProvisioning,
)
}
return phases
}
func (r *KKInstanceReconciler) reconcilePing(_ context.Context, instanceScope *scope.InstanceScope) error {
instanceScope.Info("Reconcile ping")
@ -189,14 +212,8 @@ func (r *KKInstanceReconciler) reconcileBinaryService(_ context.Context, sshClie
instanceScope.Info("Reconcile binary service")
svc := r.getBinaryService(sshClient, kkInstanceScope, instanceScope)
if err := svc.DownloadAll(r.WaitKKInstanceTimeout); err != nil {
return err
}
if err := svc.ConfigureKubelet(); err != nil {
return err
}
if err := svc.ConfigureKubeadm(); err != nil {
svc := r.getBinaryService(sshClient, kkInstanceScope, instanceScope, kkInstanceScope.Distribution())
if err := svc.Download(r.WaitKKInstanceTimeout); err != nil {
return err
}
return nil

View File

@ -16,64 +16,47 @@ limitations under the License.
package controllers
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
infrav1 "github.com/kubesphere/kubekey/api/v1beta1"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
// var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = infrav1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
//// These tests use Ginkgo (BDD-style Go testing framework). Refer to
//// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
//
//// var cfg *rest.Config
//var k8sClient client.Client
//var testEnv *envtest.Environment
//
//func TestAPIs(t *testing.T) {
// RegisterFailHandler(Fail)
//
// RunSpecsWithDefaultAndCustomReporters(t,
// "Controller Suite",
// []Reporter{printer.NewlineReporter{}})
//}
//
//var _ = BeforeSuite(func() {
// logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
//
// By("bootstrapping test environment")
// testEnv = &envtest.Environment{
// CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
// ErrorIfCRDPathMissing: true,
// }
//
// cfg, err := testEnv.Start()
// Expect(err).NotTo(HaveOccurred())
// Expect(cfg).NotTo(BeNil())
//
// err = infrav1.AddToScheme(scheme.Scheme)
// Expect(err).NotTo(HaveOccurred())
//
// //+kubebuilder:scaffold:scheme
//
// k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
// Expect(err).NotTo(HaveOccurred())
// Expect(k8sClient).NotTo(BeNil())
//
//}, 60)
//
//var _ = AfterSuite(func() {
// By("tearing down the test environment")
// err := testEnv.Stop()
// Expect(err).NotTo(HaveOccurred())
//})

36
controlplane/k3s/PROJECT Normal file
View File

@ -0,0 +1,36 @@
domain: cluster.x-k8s.io
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: k3s
repo: github.com/kubesphere/kubekey/controlplane/k3s
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cluster.x-k8s.io
group: controlplane
kind: K3sControlPlane
path: github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1
version: v1beta1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: cluster.x-k8s.io
group: controlplane
kind: K3sControlPlaneTemplate
path: github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1
version: v1beta1
webhooks:
defaulting: true
validation: true
webhookVersion: v1
version: "3"

View File

@ -0,0 +1,135 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// Conditions and condition Reasons for the KubeadmControlPlane object.
const (
// MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane.
MachinesReadyCondition clusterv1.ConditionType = "MachinesReady"
)
const (
// CertificatesAvailableCondition documents that cluster certificates were generated as part of the
// processing of a a KubeadmControlPlane object.
CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable"
// CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting
// an error while generating certificates; those kind of errors are usually temporary and the controller
// automatically recover from them.
CertificatesGenerationFailedReason = "CertificatesGenerationFailed"
)
const (
// AvailableCondition documents that the first control plane instance has completed the kubeadm init operation
// and so the control plane is available and an API server instance is ready for processing requests.
AvailableCondition clusterv1.ConditionType = "Available"
// WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first
// control plane instance to complete the kubeadm init operation.
WaitingForKubeadmInitReason = "WaitingForKubeadmInit"
)
const (
// MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the K3sControlPlane
// is up to date. When this condition is false, the KubeadmControlPlane is executing a rolling upgrade.
MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate"
// RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a
// rolling upgrade for aligning the machines spec to the desired state.
RollingUpdateInProgressReason = "RollingUpdateInProgress"
)
const (
// ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines.
ResizedCondition clusterv1.ConditionType = "Resized"
// ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas.
ScalingUpReason = "ScalingUp"
// ScalingDownReason (Severity=Info) documents a KubeadmControlPlane that is decreasing the number of replicas.
ScalingDownReason = "ScalingDown"
)
const (
// ControlPlaneComponentsHealthyCondition reports the overall status of control plane components
// implemented as static pods generated by kubeadm including kube-api-server, kube-controller manager,
// kube-scheduler and etcd if managed.
ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy"
// ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy.
ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy"
// ControlPlaneComponentsUnknownReason reports a control plane component in unknown status.
ControlPlaneComponentsUnknownReason = "ControlPlaneComponentsUnknown"
// ControlPlaneComponentsInspectionFailedReason documents a failure in inspecting the control plane component status.
ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed"
// MachineAgentHealthyCondition reports a machine's agent operational status.
// NOTE: This conditions exists only if a stacked etcd cluster is used.
MachineAgentHealthyCondition clusterv1.ConditionType = "AgentHealthy"
// PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase.
PodProvisioningReason = "PodProvisioning"
// PodMissingReason (Severity=Error) documents a pod does not exist.
PodMissingReason = "PodMissing"
// PodFailedReason (Severity=Error) documents if a pod failed during provisioning i.e., e.g CrashLoopbackOff, ImagePullBackOff
// or if all the containers in a pod have terminated.
PodFailedReason = "PodFailed"
// PodInspectionFailedReason documents a failure in inspecting the pod status.
PodInspectionFailedReason = "PodInspectionFailed"
)
const (
// EtcdClusterHealthyCondition documents the overall etcd cluster's health.
EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition"
// EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status.
EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed"
// MachineEtcdMemberHealthyCondition report the machine's etcd member's health status.
// NOTE: This conditions exists only if a stacked etcd cluster is used.
MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy"
// EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status.
EtcdMemberInspectionFailedReason = "MemberInspectionFailed"
// MachinesCreatedCondition documents that the machines controlled by the K3sControlPlane are created.
// When this condition is false, it indicates that there was an error when cloning the infrastructure/bootstrap template or
// when generating the machine object.
MachinesCreatedCondition clusterv1.ConditionType = "MachinesCreated"
// InfrastructureTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// clone the infrastructure template.
InfrastructureTemplateCloningFailedReason = "InfrastructureTemplateCloningFailed"
// BootstrapTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// clone the bootstrap template.
BootstrapTemplateCloningFailedReason = "BootstrapTemplateCloningFailed"
// MachineGenerationFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// generate a machine object.
MachineGenerationFailedReason = "MachineGenerationFailed"
)

View File

@ -0,0 +1,36 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1beta1 contains API Schema definitions for the controlplane v1beta1 API group
// +kubebuilder:object:generate=true
// +groupName=controlplane.cluster.x-k8s.io
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "controlplane.cluster.x-k8s.io", Version: "v1beta1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,242 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/errors"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
// RolloutStrategyType defines the rollout strategies for a KubeadmControlPlane.
type RolloutStrategyType string
const (
// RollingUpdateStrategyType replaces the old control planes by new one using rolling update
// i.e. gradually scale up or down the old control planes and scale up or down the new one.
RollingUpdateStrategyType RolloutStrategyType = "RollingUpdate"
)
const (
// K3sControlPlaneFinalizer is the finalizer applied to K3sControlPlane resources
// by its managing controller.
K3sControlPlaneFinalizer = "k3s.controlplane.cluster.x-k8s.io"
// SkipCoreDNSAnnotation annotation explicitly skips reconciling CoreDNS if set.
SkipCoreDNSAnnotation = "controlplane.cluster.x-k8s.io/skip-coredns"
// SkipKubeProxyAnnotation annotation explicitly skips reconciling kube-proxy if set.
SkipKubeProxyAnnotation = "controlplane.cluster.x-k8s.io/skip-kube-proxy"
// K3sServerConfigurationAnnotation is a machine annotation that stores the json-marshalled string of K3SCP ClusterConfiguration.
// This annotation is used to detect any changes in ClusterConfiguration and trigger machine rollout in K3SCP.
K3sServerConfigurationAnnotation = "controlplane.cluster.x-k8s.io/k3s-server-configuration"
)
// K3sControlPlaneSpec defines the desired state of K3sControlPlane
type K3sControlPlaneSpec struct {
// Number of desired machines. Defaults to 1. When stacked etcd is used only
// odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members).
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
Replicas *int32 `json:"replicas,omitempty"`
// Version defines the desired K3s version.
Version string `json:"version"`
// MachineTemplate contains information about how machines
// should be shaped when creating or updating a control plane.
MachineTemplate K3sControlPlaneMachineTemplate `json:"machineTemplate"`
// K3sConfigSpec is a K3sConfigSpec
// to use for initializing and joining machines to the control plane.
// +optional
K3sConfigSpec infrabootstrapv1.K3sConfigSpec `json:"k3sConfigSpec,omitempty"`
// RolloutAfter is a field to indicate a rollout should be performed
// after the specified time even if no changes have been made to the
// KubeadmControlPlane.
//
// +optional
RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"`
// The RolloutStrategy to use to replace control plane machines with
// new ones.
// +optional
// +kubebuilder:default={type: "RollingUpdate", rollingUpdate: {maxSurge: 1}}
RolloutStrategy *RolloutStrategy `json:"rolloutStrategy,omitempty"`
}
// K3sControlPlaneMachineTemplate defines the template for Machines
// in a K3sControlPlane object.
type K3sControlPlaneMachineTemplate struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"`
// InfrastructureRef is a required reference to a custom resource
// offered by an infrastructure provider.
InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"`
// NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node
// The default value is 0, meaning that the node can be drained without any time limitations.
// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
// +optional
NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"`
// NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine
// hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely.
// If no value is provided, the default value for this property of the Machine resource will be used.
// +optional
NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"`
}
// RolloutStrategy describes how to replace existing machines
// with new ones.
type RolloutStrategy struct {
// Type of rollout. Currently the only supported strategy is
// "RollingUpdate".
// Default is RollingUpdate.
// +optional
Type RolloutStrategyType `json:"type,omitempty"`
// Rolling update config params. Present only if
// RolloutStrategyType = RollingUpdate.
// +optional
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
// RollingUpdate is used to control the desired behavior of rolling update.
type RollingUpdate struct {
// The maximum number of control planes that can be scheduled above or under the
// desired number of control planes.
// Value can be an absolute number 1 or 0.
// Defaults to 1.
// Example: when this is set to 1, the control plane can be scaled
// up immediately when the rolling update starts.
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// K3sControlPlaneStatus defines the observed state of K3sControlPlane
type K3sControlPlaneStatus struct {
// Selector is the label selector in string format to avoid introspection
// by clients, and is used to provide the CRD-based integration for the
// scale subresource and additional integrations for things like kubectl
// describe.. The string will be in the same format as the query-param syntax.
// More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
Selector string `json:"selector,omitempty"`
// Total number of non-terminated machines targeted by this control plane
// (their labels match the selector).
// +optional
Replicas int32 `json:"replicas"`
// Version represents the minimum Kubernetes version for the control plane machines
// in the cluster.
// +optional
Version *string `json:"version,omitempty"`
// Total number of non-terminated machines targeted by this control plane
// that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of fully running and ready control plane machines.
// +optional
ReadyReplicas int32 `json:"readyReplicas"`
// Total number of unavailable machines targeted by this control plane.
// This is the total number of machines that are still required for
// the deployment to have 100% available capacity. They may either
// be machines that are running but not yet ready or machines
// that still have not been created.
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas"`
// Initialized denotes whether or not the control plane has the
// uploaded kubeadm-config configmap.
// +optional
Initialized bool `json:"initialized"`
// Ready denotes that the KubeadmControlPlane API Server is ready to
// receive requests.
// +optional
Ready bool `json:"ready"`
// FailureReason indicates that there is a terminal problem reconciling the
// state, and will be set to a token value suitable for
// programmatic interpretation.
// +optional
FailureReason errors.KubeadmControlPlaneStatusError `json:"failureReason,omitempty"`
// ErrorMessage indicates that there is a terminal problem reconciling the
// state, and will be set to a descriptive error message.
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`
// ObservedGeneration is the latest generation observed by the controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions defines current service state of the KubeadmControlPlane.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=k3scontrolplanes,shortName=k3scp,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// K3sControlPlane is the Schema for the k3scontrolplanes API
type K3sControlPlane struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sControlPlaneSpec `json:"spec,omitempty"`
Status K3sControlPlaneStatus `json:"status,omitempty"`
}
// GetConditions returns the set of conditions for this object.
func (in *K3sControlPlane) GetConditions() clusterv1.Conditions {
return in.Status.Conditions
}
// SetConditions sets the conditions on this object.
func (in *K3sControlPlane) SetConditions(conditions clusterv1.Conditions) {
in.Status.Conditions = conditions
}
//+kubebuilder:object:root=true
// K3sControlPlaneList contains a list of K3sControlPlane
type K3sControlPlaneList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []K3sControlPlane `json:"items"`
}
func init() {
SchemeBuilder.Register(&K3sControlPlane{}, &K3sControlPlaneList{})
}

View File

@ -0,0 +1,461 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"encoding/json"
"fmt"
"strings"
"github.com/blang/semver"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/cluster-api/util/version"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
func (in *K3sControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}
// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanes,versions=v1beta1,name=default.k3scontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanes,versions=v1beta1,name=validation.k3scontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &K3sControlPlane{}
var _ webhook.Validator = &K3sControlPlane{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (in *K3sControlPlane) Default() {
defaultK3sControlPlaneSpec(&in.Spec, in.Namespace)
}
func defaultK3sControlPlaneSpec(s *K3sControlPlaneSpec, namespace string) {
if s.Replicas == nil {
replicas := int32(1)
s.Replicas = &replicas
}
if s.MachineTemplate.InfrastructureRef.Namespace == "" {
s.MachineTemplate.InfrastructureRef.Namespace = namespace
}
if !strings.HasPrefix(s.Version, "v") {
s.Version = "v" + s.Version
}
if s.K3sConfigSpec.ServerConfiguration.Database.DataStoreEndPoint == "" && s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit {
s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit = true
}
infrabootstrapv1.DefaultK3sConfigSpec(&s.K3sConfigSpec)
s.RolloutStrategy = defaultRolloutStrategy(s.RolloutStrategy)
}
func defaultRolloutStrategy(rolloutStrategy *RolloutStrategy) *RolloutStrategy {
ios1 := intstr.FromInt(1)
if rolloutStrategy == nil {
rolloutStrategy = &RolloutStrategy{}
}
// Enforce RollingUpdate strategy and default MaxSurge if not set.
if rolloutStrategy != nil {
if len(rolloutStrategy.Type) == 0 {
rolloutStrategy.Type = RollingUpdateStrategyType
}
if rolloutStrategy.Type == RollingUpdateStrategyType {
if rolloutStrategy.RollingUpdate == nil {
rolloutStrategy.RollingUpdate = &RollingUpdate{}
}
rolloutStrategy.RollingUpdate.MaxSurge = intstr.ValueOrDefault(rolloutStrategy.RollingUpdate.MaxSurge, ios1)
}
}
return rolloutStrategy
}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (in *K3sControlPlane) ValidateCreate() error {
spec := in.Spec
allErrs := validateK3sControlPlaneSpec(spec, in.Namespace, field.NewPath("spec"))
allErrs = append(allErrs, validateServerConfiguration(spec.K3sConfigSpec.ServerConfiguration, nil, field.NewPath("spec", "k3sConfigSpec", "serverConfiguration"))...)
allErrs = append(allErrs, spec.K3sConfigSpec.Validate(field.NewPath("spec", "k3sConfigSpec"))...)
if len(allErrs) > 0 {
return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs)
}
return nil
}
const (
spec = "spec"
k3sConfigSpec = "k3sConfigSpec"
preK3sCommands = "preK3sCommands"
postK3sCommands = "postK3sCommands"
files = "files"
)
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (in *K3sControlPlane) ValidateUpdate(old runtime.Object) error {
// add a * to indicate everything beneath is ok.
// For example, {"spec", "*"} will allow any path under "spec" to change.
allowedPaths := [][]string{
{"metadata", "*"},
{spec, k3sConfigSpec, preK3sCommands},
{spec, k3sConfigSpec, postK3sCommands},
{spec, k3sConfigSpec, files},
{spec, "machineTemplate", "metadata", "*"},
{spec, "machineTemplate", "infrastructureRef", "apiVersion"},
{spec, "machineTemplate", "infrastructureRef", "name"},
{spec, "machineTemplate", "infrastructureRef", "kind"},
{spec, "machineTemplate", "nodeDrainTimeout"},
{spec, "machineTemplate", "nodeDeletionTimeout"},
{spec, "replicas"},
{spec, "version"},
{spec, "rolloutAfter"},
{spec, "rolloutStrategy", "*"},
}
allErrs := validateK3sControlPlaneSpec(in.Spec, in.Namespace, field.NewPath("spec"))
prev, ok := old.(*K3sControlPlane)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expecting K3sControlPlane but got a %T", old))
}
originalJSON, err := json.Marshal(prev)
if err != nil {
return apierrors.NewInternalError(err)
}
modifiedJSON, err := json.Marshal(in)
if err != nil {
return apierrors.NewInternalError(err)
}
diff, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
if err != nil {
return apierrors.NewInternalError(err)
}
jsonPatch := map[string]interface{}{}
if err := json.Unmarshal(diff, &jsonPatch); err != nil {
return apierrors.NewInternalError(err)
}
// Build a list of all paths that are trying to change
diffpaths := paths([]string{}, jsonPatch)
// Every path in the diff must be valid for the update function to work.
for _, path := range diffpaths {
// Ignore paths that are empty
if len(path) == 0 {
continue
}
if !allowed(allowedPaths, path) {
if len(path) == 1 {
allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0]), "cannot be modified"))
continue
}
allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0], path[1:]...), "cannot be modified"))
}
}
allErrs = append(allErrs, in.validateVersion(prev.Spec.Version)...)
allErrs = append(allErrs, validateServerConfiguration(in.Spec.K3sConfigSpec.ServerConfiguration, prev.Spec.K3sConfigSpec.ServerConfiguration, field.NewPath("spec", "k3sConfigSpec", "serverConfiguration"))...)
allErrs = append(allErrs, in.Spec.K3sConfigSpec.Validate(field.NewPath("spec", "K3sConfigSpec"))...)
if len(allErrs) > 0 {
return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlane").GroupKind(), in.Name, allErrs)
}
return nil
}
func validateK3sControlPlaneSpec(s K3sControlPlaneSpec, namespace string, pathPrefix *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if s.Replicas == nil {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("replicas"),
"is required",
),
)
} else if *s.Replicas <= 0 {
// The use of the scale subresource should provide a guarantee that negative values
// should not be accepted for this field, but since we have to validate that Replicas != 0
// it doesn't hurt to also additionally validate for negative numbers here as well.
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("replicas"),
"cannot be less than or equal to 0",
),
)
}
if s.MachineTemplate.InfrastructureRef.APIVersion == "" {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("machineTemplate", "infrastructure", "apiVersion"),
s.MachineTemplate.InfrastructureRef.APIVersion,
"cannot be empty",
),
)
}
if s.MachineTemplate.InfrastructureRef.Kind == "" {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("machineTemplate", "infrastructure", "kind"),
s.MachineTemplate.InfrastructureRef.Kind,
"cannot be empty",
),
)
}
if s.MachineTemplate.InfrastructureRef.Name == "" {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("machineTemplate", "infrastructure", "name"),
s.MachineTemplate.InfrastructureRef.Name,
"cannot be empty",
),
)
}
if s.MachineTemplate.InfrastructureRef.Namespace != namespace {
allErrs = append(
allErrs,
field.Invalid(
pathPrefix.Child("machineTemplate", "infrastructure", "namespace"),
s.MachineTemplate.InfrastructureRef.Namespace,
"must match metadata.namespace",
),
)
}
if !version.KubeSemver.MatchString(s.Version) {
allErrs = append(allErrs, field.Invalid(pathPrefix.Child("version"), s.Version, "must be a valid semantic version"))
}
allErrs = append(allErrs, validateRolloutStrategy(s.RolloutStrategy, s.Replicas, pathPrefix.Child("rolloutStrategy"))...)
return allErrs
}
func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32, pathPrefix *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if rolloutStrategy == nil {
return allErrs
}
if rolloutStrategy.Type != RollingUpdateStrategyType {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("type"),
"only RollingUpdateStrategyType is supported",
),
)
}
ios1 := intstr.FromInt(1)
ios0 := intstr.FromInt(0)
if *rolloutStrategy.RollingUpdate.MaxSurge == ios0 && (replicas != nil && *replicas < int32(3)) {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("rollingUpdate"),
"when KubeadmControlPlane is configured to scale-in, replica count needs to be at least 3",
),
)
}
if *rolloutStrategy.RollingUpdate.MaxSurge != ios1 && *rolloutStrategy.RollingUpdate.MaxSurge != ios0 {
allErrs = append(
allErrs,
field.Required(
pathPrefix.Child("rollingUpdate", "maxSurge"),
"value must be 1 or 0",
),
)
}
return allErrs
}
func validateServerConfiguration(newServerConfiguration, oldServerConfiguration *infrabootstrapv1.ServerConfiguration, pathPrefix *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if newServerConfiguration == nil {
return allErrs
}
if newServerConfiguration.Database.ClusterInit && newServerConfiguration.Database.DataStoreEndPoint != "" {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "clusterInit"),
"cannot have both external and local etcd",
),
)
}
// update validations
if oldServerConfiguration != nil {
if newServerConfiguration.Database.ClusterInit && oldServerConfiguration.Database.DataStoreEndPoint != "" {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "clusterInit"),
"cannot change between external and local etcd",
),
)
}
if newServerConfiguration.Database.DataStoreEndPoint != "" && oldServerConfiguration.Database.ClusterInit {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "dataStoreEndPoint"),
"cannot change between external and local etcd",
),
)
}
}
return allErrs
}
func allowed(allowList [][]string, path []string) bool {
for _, allowed := range allowList {
if pathsMatch(allowed, path) {
return true
}
}
return false
}
func pathsMatch(allowed, path []string) bool {
// if either are empty then no match can be made
if len(allowed) == 0 || len(path) == 0 {
return false
}
i := 0
for i = range path {
// reached the end of the allowed path and no match was found
if i > len(allowed)-1 {
return false
}
if allowed[i] == "*" {
return true
}
if path[i] != allowed[i] {
return false
}
}
// path has been completely iterated and has not matched the end of the path.
// e.g. allowed: []string{"a","b","c"}, path: []string{"a"}
return i >= len(allowed)-1
}
// paths builds a slice of paths that are being modified.
func paths(path []string, diff map[string]interface{}) [][]string {
allPaths := [][]string{}
for key, m := range diff {
nested, ok := m.(map[string]interface{})
if !ok {
// We have to use a copy of path, because otherwise the slice we append to
// allPaths would be overwritten in another iteration.
tmp := make([]string, len(path))
copy(tmp, path)
allPaths = append(allPaths, append(tmp, key))
continue
}
allPaths = append(allPaths, paths(append(path, key), nested)...)
}
return allPaths
}
func (in *K3sControlPlane) validateVersion(previousVersion string) (allErrs field.ErrorList) {
fromVersion, err := version.ParseMajorMinorPatch(previousVersion)
if err != nil {
allErrs = append(allErrs,
field.InternalError(
field.NewPath("spec", "version"),
errors.Wrapf(err, "failed to parse current k3scontrolplane version: %s", previousVersion),
),
)
return allErrs
}
toVersion, err := version.ParseMajorMinorPatch(in.Spec.Version)
if err != nil {
allErrs = append(allErrs,
field.InternalError(
field.NewPath("spec", "version"),
errors.Wrapf(err, "failed to parse updated k3scontrolplane version: %s", in.Spec.Version),
),
)
return allErrs
}
// Check if we're trying to upgrade to Kubernetes v1.19.0, which is not supported.
//
// See https://github.com/kubernetes-sigs/cluster-api/issues/3564
if fromVersion.NE(toVersion) && toVersion.Equals(semver.MustParse("1.19.0")) {
allErrs = append(allErrs,
field.Forbidden(
field.NewPath("spec", "version"),
"cannot update Kubernetes version to v1.19.0, for more information see https://github.com/kubernetes-sigs/cluster-api/issues/3564",
),
)
return allErrs
}
// Since upgrades to the next minor version are allowed, irrespective of the patch version.
ceilVersion := semver.Version{
Major: fromVersion.Major,
Minor: fromVersion.Minor + 2,
Patch: 0,
}
if toVersion.GTE(ceilVersion) {
allErrs = append(allErrs,
field.Forbidden(
field.NewPath("spec", "version"),
fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, in.Spec.Version),
),
)
}
return allErrs
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (in *K3sControlPlane) ValidateDelete() error {
return nil
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
// K3sControlPlaneTemplateSpec defines the desired state of K3sControlPlaneTemplate
type K3sControlPlaneTemplateSpec struct {
Template K3sControlPlaneTemplateResource `json:"template"`
}
// K3sControlPlaneTemplateResource describes the data needed to create a K3sControlPlane from a template.
type K3sControlPlaneTemplateResource struct {
Spec K3sControlPlaneTemplateResourceSpec `json:"spec"`
}
// K3sControlPlaneTemplateResourceSpec defines the desired state of KubeadmControlPlane.
// NOTE: K3sControlPlaneTemplateResourceSpec is similar to K3sControlPlaneSpec but
// omits Replicas and Version fields. These fields do not make sense on the K3sControlPlaneTemplate,
// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot
// be configured on the K3sControlPlaneTemplate.
type K3sControlPlaneTemplateResourceSpec struct {
// MachineTemplate contains information about how machines
// should be shaped when creating or updating a control plane.
// +optional
MachineTemplate *K3sControlPlaneTemplateMachineTemplate `json:"machineTemplate,omitempty"`
// K3sConfigSpec is a K3sConfigSpec
// to use for initializing and joining machines to the control plane.
K3sConfigSpec infrabootstrapv1.K3sConfigSpec `json:"k3sConfigSpec"`
// RolloutAfter is a field to indicate a rollout should be performed
// after the specified time even if no changes have been made to the
// KubeadmControlPlane.
//
// +optional
RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"`
// The RolloutStrategy to use to replace control plane machines with
// new ones.
// +optional
// +kubebuilder:default={type: "RollingUpdate", rollingUpdate: {maxSurge: 1}}
RolloutStrategy *RolloutStrategy `json:"rolloutStrategy,omitempty"`
}
// K3sControlPlaneTemplateMachineTemplate defines the template for Machines
// in a KubeadmControlPlaneTemplate object.
// NOTE: KubeadmControlPlaneTemplateMachineTemplate is similar to KubeadmControlPlaneMachineTemplate but
// omits ObjectMeta and InfrastructureRef fields. These fields do not make sense on the KubeadmControlPlaneTemplate,
// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot
// be configured on the KubeadmControlPlaneTemplate.
type K3sControlPlaneTemplateMachineTemplate struct {
// NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node
// The default value is 0, meaning that the node can be drained without any time limitations.
// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
// +optional
NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"`
// NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine
// hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely.
// If no value is provided, the default value for this property of the Machine resource will be used.
// +optional
NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// K3sControlPlaneTemplate is the Schema for the k3scontrolplanetemplates API
type K3sControlPlaneTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec K3sControlPlaneTemplateSpec `json:"spec,omitempty"`
}
//+kubebuilder:object:root=true
// K3sControlPlaneTemplateList contains a list of K3sControlPlaneTemplate
type K3sControlPlaneTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []K3sControlPlaneTemplate `json:"items"`
}
func init() {
SchemeBuilder.Register(&K3sControlPlaneTemplate{}, &K3sControlPlaneTemplateList{})
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"reflect"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/cluster-api/feature"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
const k3sControlPlaneTemplateImmutableMsg = "K3sControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead."
func (r *K3sControlPlaneTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
Complete()
}
// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate,mutating=true,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanetemplates,versions=v1beta1,name=default.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Defaulter = &K3sControlPlaneTemplate{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *K3sControlPlaneTemplate) Default() {
infrabootstrapv1.DefaultK3sConfigSpec(&r.Spec.Template.Spec.K3sConfigSpec)
r.Spec.Template.Spec.RolloutStrategy = defaultRolloutStrategy(r.Spec.Template.Spec.RolloutStrategy)
}
// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta1-k3scontrolplanetemplate,mutating=false,failurePolicy=fail,groups=controlplane.cluster.x-k8s.io,resources=k3scontrolplanetemplates,versions=v1beta1,name=validation.k3scontrolplanetemplate.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1
var _ webhook.Validator = &K3sControlPlaneTemplate{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *K3sControlPlaneTemplate) ValidateCreate() error {
// NOTE: KubeadmControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook
// must prevent creating new objects in case the feature flag is disabled.
if !feature.Gates.Enabled(feature.ClusterTopology) {
return field.Forbidden(
field.NewPath("spec"),
"can be set only if the ClusterTopology feature flag is enabled",
)
}
spec := r.Spec.Template.Spec
allErrs := validateK3sControlPlaneTemplateResourceSpec(spec, field.NewPath("spec", "template", "spec"))
allErrs = append(allErrs, validateServerConfiguration(spec.K3sConfigSpec.ServerConfiguration, nil, field.NewPath("spec", "template", "spec", "k3sConfigSpec", "serverConfiguration"))...)
allErrs = append(allErrs, spec.K3sConfigSpec.Validate(field.NewPath("spec", "template", "spec", "k3sConfigSpec"))...)
if len(allErrs) > 0 {
return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlaneTemplate").GroupKind(), r.Name, allErrs)
}
return nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *K3sControlPlaneTemplate) ValidateUpdate(oldRaw runtime.Object) error {
var allErrs field.ErrorList
old, ok := oldRaw.(*K3sControlPlaneTemplate)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a K3sControlPlaneTemplate but got a %T", oldRaw))
}
if !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) {
allErrs = append(allErrs,
field.Invalid(field.NewPath("spec", "template", "spec"), r, k3sControlPlaneTemplateImmutableMsg),
)
}
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlaneTemplate").GroupKind(), r.Name, allErrs)
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *K3sControlPlaneTemplate) ValidateDelete() error {
return nil
}
// validateK3sControlPlaneTemplateResourceSpec is a copy of validateK3sControlPlaneSpec which
// only validates the fields in K3sControlPlaneTemplateResourceSpec we care about.
func validateK3sControlPlaneTemplateResourceSpec(s K3sControlPlaneTemplateResourceSpec, pathPrefix *field.Path) field.ErrorList {
return validateRolloutStrategy(s.RolloutStrategy, nil, pathPrefix.Child("rolloutStrategy"))
}

View File

@ -0,0 +1,363 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlane) DeepCopyInto(out *K3sControlPlane) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlane.
func (in *K3sControlPlane) DeepCopy() *K3sControlPlane {
if in == nil {
return nil
}
out := new(K3sControlPlane)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sControlPlane) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneList) DeepCopyInto(out *K3sControlPlaneList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]K3sControlPlane, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneList.
func (in *K3sControlPlaneList) DeepCopy() *K3sControlPlaneList {
if in == nil {
return nil
}
out := new(K3sControlPlaneList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sControlPlaneList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneMachineTemplate) DeepCopyInto(out *K3sControlPlaneMachineTemplate) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.InfrastructureRef = in.InfrastructureRef
if in.NodeDrainTimeout != nil {
in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout
*out = new(v1.Duration)
**out = **in
}
if in.NodeDeletionTimeout != nil {
in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout
*out = new(v1.Duration)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneMachineTemplate.
func (in *K3sControlPlaneMachineTemplate) DeepCopy() *K3sControlPlaneMachineTemplate {
if in == nil {
return nil
}
out := new(K3sControlPlaneMachineTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneSpec) DeepCopyInto(out *K3sControlPlaneSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
in.MachineTemplate.DeepCopyInto(&out.MachineTemplate)
in.K3sConfigSpec.DeepCopyInto(&out.K3sConfigSpec)
if in.RolloutAfter != nil {
in, out := &in.RolloutAfter, &out.RolloutAfter
*out = (*in).DeepCopy()
}
if in.RolloutStrategy != nil {
in, out := &in.RolloutStrategy, &out.RolloutStrategy
*out = new(RolloutStrategy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneSpec.
func (in *K3sControlPlaneSpec) DeepCopy() *K3sControlPlaneSpec {
if in == nil {
return nil
}
out := new(K3sControlPlaneSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneStatus) DeepCopyInto(out *K3sControlPlaneStatus) {
*out = *in
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(string)
**out = **in
}
if in.FailureMessage != nil {
in, out := &in.FailureMessage, &out.FailureMessage
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(apiv1beta1.Conditions, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneStatus.
func (in *K3sControlPlaneStatus) DeepCopy() *K3sControlPlaneStatus {
if in == nil {
return nil
}
out := new(K3sControlPlaneStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplate) DeepCopyInto(out *K3sControlPlaneTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplate.
func (in *K3sControlPlaneTemplate) DeepCopy() *K3sControlPlaneTemplate {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sControlPlaneTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplateList) DeepCopyInto(out *K3sControlPlaneTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]K3sControlPlaneTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateList.
func (in *K3sControlPlaneTemplateList) DeepCopy() *K3sControlPlaneTemplateList {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *K3sControlPlaneTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplateMachineTemplate) DeepCopyInto(out *K3sControlPlaneTemplateMachineTemplate) {
*out = *in
if in.NodeDrainTimeout != nil {
in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout
*out = new(v1.Duration)
**out = **in
}
if in.NodeDeletionTimeout != nil {
in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout
*out = new(v1.Duration)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateMachineTemplate.
func (in *K3sControlPlaneTemplateMachineTemplate) DeepCopy() *K3sControlPlaneTemplateMachineTemplate {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplateMachineTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplateResource) DeepCopyInto(out *K3sControlPlaneTemplateResource) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateResource.
func (in *K3sControlPlaneTemplateResource) DeepCopy() *K3sControlPlaneTemplateResource {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplateResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplateResourceSpec) DeepCopyInto(out *K3sControlPlaneTemplateResourceSpec) {
*out = *in
if in.MachineTemplate != nil {
in, out := &in.MachineTemplate, &out.MachineTemplate
*out = new(K3sControlPlaneTemplateMachineTemplate)
(*in).DeepCopyInto(*out)
}
in.K3sConfigSpec.DeepCopyInto(&out.K3sConfigSpec)
if in.RolloutAfter != nil {
in, out := &in.RolloutAfter, &out.RolloutAfter
*out = (*in).DeepCopy()
}
if in.RolloutStrategy != nil {
in, out := &in.RolloutStrategy, &out.RolloutStrategy
*out = new(RolloutStrategy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateResourceSpec.
func (in *K3sControlPlaneTemplateResourceSpec) DeepCopy() *K3sControlPlaneTemplateResourceSpec {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplateResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K3sControlPlaneTemplateSpec) DeepCopyInto(out *K3sControlPlaneTemplateSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K3sControlPlaneTemplateSpec.
func (in *K3sControlPlaneTemplateSpec) DeepCopy() *K3sControlPlaneTemplateSpec {
if in == nil {
return nil
}
out := new(K3sControlPlaneTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
*out = *in
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
func (in *RollingUpdate) DeepCopy() *RollingUpdate {
if in == nil {
return nil
}
out := new(RollingUpdate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy.
func (in *RolloutStrategy) DeepCopy() *RolloutStrategy {
if in == nil {
return nil
}
out := new(RolloutStrategy)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,25 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize

View File

@ -0,0 +1,5 @@
resources:
- certificate.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,19 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io
fieldSpecs:
- kind: Certificate
group: cert-manager.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: cert-manager.io
path: spec/commonName
- kind: Certificate
group: cert-manager.io
path: spec/dnsNames
- kind: Certificate
group: cert-manager.io
path: spec/secretName

Some files were not shown because too many files have changed in this diff Show More