feat: add k3s provider e2e test

Signed-off-by: 24sama <jacksama@foxmail.com>
This commit is contained in:
24sama 2022-10-31 10:34:52 +08:00
parent 7163d6d24a
commit b492f1c6d3
87 changed files with 8107 additions and 330 deletions

34
.github/Jenkinsfile vendored
View File

@ -1,34 +0,0 @@
pipeline {
agent {
node {
label 'go18'
}
}
stages {
stage('Test') {
steps {
container('go') {
sh 'go mod tidy'
sh '''
if [ -n "$(git status --porcelain)" ]; then
echo 'To fix this check, run "go mod tidy"'
git status # Show the files that failed to pass the check.
exit 1
fi
'''
}
}
}
stage('Build') {
steps {
container('go') {
sh 'make kk'
}
}
}
}
}

View File

@ -24,5 +24,5 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v3.2.0
with:
version: v1.49.0
version: v1.50.1
working-directory: ${{matrix.working-directory}}

View File

@ -126,14 +126,44 @@ linters-settings:
alias: infrav1
- pkg: github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1
alias: infrabootstrapv1
- pkg: github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1
alias: infracontrolplanev1
nolintlint:
allow-unused: false
allow-leading-space: false
require-specific: true
staticcheck:
go: "1.19"
stylecheck:
go: "1.19"
revive:
rules:
# The following rules are recommended https://github.com/mgechev/revive#recommended-configuration
- name: blank-imports
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: error-return
- name: error-strings
- name: error-naming
- name: exported
#- name: if-return # TODO This is a recommended rule with many findings which may require it's own pr.
- name: increment-decrement
- name: var-naming
- name: var-declaration
- name: package-comments
- name: range
- name: receiver-naming
- name: time-naming
- name: unexported-return
- name: indent-error-flow
- name: errorf
- name: empty-block
- name: superfluous-else
#- name: unused-parameter # TODO This is a recommended rule with many findings which may require it's own pr.
- name: unreachable-code
- name: redefines-builtin-id
#
# Rules in addition to the recommended configuration above.
#
- name: bool-literal-in-expr
- name: constant-logical-expr
gosec:
excludes:
- G307 # Deferring unsafe method "Close" on type "\*os.File"
@ -157,7 +187,8 @@ linters-settings:
- wrapperFunc
- commentFormatting
- filepathJoin
- commentedOutCode
- rangeValCopy
- hugeParam
issues:
max-same-issues: 0
max-issues-per-linter: 0
@ -243,6 +274,7 @@ issues:
run:
timeout: 10m
go: "1.19"
build-tags:
- tools
- e2e

View File

@ -133,6 +133,7 @@ generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e
$(MAKE) clean-generated-yaml SRC_DIRS="./config/crd/bases"
$(CONTROLLER_GEN) \
paths=./api/... \
paths=./controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./config/crd/bases \
@ -144,6 +145,7 @@ generate-manifests-k3s-bootstrap: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate man
$(MAKE) clean-generated-yaml SRC_DIRS="./bootstrap/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./bootstrap/k3s/api/... \
paths=./bootstrap/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./bootstrap/k3s/config/crd/bases \
@ -156,6 +158,7 @@ generate-manifests-k3s-control-plane: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate
$(MAKE) clean-generated-yaml SRC_DIRS="./controlplane/k3s/config/crd/bases"
$(CONTROLLER_GEN) \
paths=./controlplane/k3s/api/... \
paths=./controlplane/k3s/controllers/... \
crd:crdVersions=v1 \
rbac:roleName=manager-role \
output:crd:dir=./controlplane/k3s/config/crd/bases \

View File

@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The KubeSphere Authors.
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -27,6 +27,12 @@ type ServerConfiguration struct {
// Networking is the networking configuration.
Networking Networking `json:"networking,omitempty"`
// KubernetesComponents is the kubernetes components configuration.
KubernetesComponents KubernetesComponents `json:"kubernetesComponents,omitempty"`
// KubernetesProcesses is the kubernetes processes configuration.
KubernetesProcesses KubernetesProcesses `json:"kubernetesProcesses,omitempty"`
// Agent is the agent configuration.
Agent AgentConfiguration `json:"agent,omitempty"`
}
@ -41,6 +47,9 @@ type AgentConfiguration struct {
// Networking defines the k3s agent networking configuration.
Networking AgentNetworking `json:"networking,omitempty"`
// KubernetesAgentProcesses defines the k3s agent kubernetes processes configuration.
KubernetesAgentProcesses KubernetesAgentProcesses `json:"kubernetesAgentProcesses,omitempty"`
}
// Database defines the desired state of k3s database configuration.
@ -58,7 +67,7 @@ type Database struct {
DataStoreKeyFile string `json:"dataStoreKeyFile,omitempty"`
// ClusterInit initialize a new cluster using embedded Etcd.
ClusterInit bool `json:"clusterInit,omitempty"`
ClusterInit *bool `json:"clusterInit,omitempty"`
}
// Cluster is the desired state of k3s cluster configuration.
@ -159,3 +168,45 @@ type AgentNetworking struct {
// ResolvConf Path to Kubelet resolv.conf file.
ResolvConf string `json:"resolvConf,omitempty"`
}
// KubernetesComponents defines the desired state of k3s kubernetes components configuration.
type KubernetesComponents struct {
// Disable do not deploy packaged components and delete any deployed components
// (valid items: coredns, servicelb, traefik,local-storage, metrics-server).
Disable string `json:"disable,omitempty"`
// DisableKubeProxy disable running kube-proxy.
DisableKubeProxy bool `json:"disableKubeProxy,omitempty"`
// DisableNetworkPolicy disable k3s default network policy controller.
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
// DisableHelmController disable Helm controller.
DisableHelmController bool `json:"disableHelmController,omitempty"`
}
// KubernetesProcesses defines the desired state of kubernetes processes configuration.
type KubernetesProcesses struct {
// KubeAPIServerArgs is a customized flag for kube-apiserver process
// +optional
KubeAPIServerArgs []string `json:"kubeAPIServerArg,omitempty"`
// KubeControllerManagerArgs is a customized flag for kube-controller-manager process
// +optional
KubeControllerManagerArgs []string `json:"kubeControllerManagerArgs,omitempty"`
// KubeSchedulerArgs is a customized flag for kube-scheduler process
// +optional
KubeSchedulerArgs []string `json:"kubeSchedulerArgs,omitempty"`
}
// KubernetesAgentProcesses defines the desired state of kubernetes agent processes configuration.
type KubernetesAgentProcesses struct {
// KubeletArgs Customized flag for kubelet process
// +optional
KubeletArgs []string `json:"kubeletArgs,omitempty"`
// KubeProxyArgs Customized flag for kube-proxy process
// +optional
KubeProxyArgs []string `json:"kubeProxyArgs,omitempty"`
}

View File

@ -33,6 +33,7 @@ func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) {
in.Node.DeepCopyInto(&out.Node)
out.Runtime = in.Runtime
out.Networking = in.Networking
in.KubernetesAgentProcesses.DeepCopyInto(&out.KubernetesAgentProcesses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfiguration.
@ -118,6 +119,11 @@ func (in *Cluster) DeepCopy() *Cluster {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Database) DeepCopyInto(out *Database) {
*out = *in
if in.ClusterInit != nil {
in, out := &in.ClusterInit, &out.ClusterInit
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
@ -358,6 +364,76 @@ func (in *K3sConfigTemplateSpec) DeepCopy() *K3sConfigTemplateSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesAgentProcesses) DeepCopyInto(out *KubernetesAgentProcesses) {
*out = *in
if in.KubeletArgs != nil {
in, out := &in.KubeletArgs, &out.KubeletArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeProxyArgs != nil {
in, out := &in.KubeProxyArgs, &out.KubeProxyArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAgentProcesses.
func (in *KubernetesAgentProcesses) DeepCopy() *KubernetesAgentProcesses {
if in == nil {
return nil
}
out := new(KubernetesAgentProcesses)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesComponents) DeepCopyInto(out *KubernetesComponents) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponents.
func (in *KubernetesComponents) DeepCopy() *KubernetesComponents {
if in == nil {
return nil
}
out := new(KubernetesComponents)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesProcesses) DeepCopyInto(out *KubernetesProcesses) {
*out = *in
if in.KubeAPIServerArgs != nil {
in, out := &in.KubeAPIServerArgs, &out.KubeAPIServerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeControllerManagerArgs != nil {
in, out := &in.KubeControllerManagerArgs, &out.KubeControllerManagerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeSchedulerArgs != nil {
in, out := &in.KubeSchedulerArgs, &out.KubeSchedulerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesProcesses.
func (in *KubernetesProcesses) DeepCopy() *KubernetesProcesses {
if in == nil {
return nil
}
out := new(KubernetesProcesses)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
@ -391,9 +467,11 @@ func (in *Networking) DeepCopy() *Networking {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) {
*out = *in
out.Database = in.Database
in.Database.DeepCopyInto(&out.Database)
out.Listener = in.Listener
out.Networking = in.Networking
out.KubernetesComponents = in.KubernetesComponents
in.KubernetesProcesses.DeepCopyInto(&out.KubernetesProcesses)
in.Agent.DeepCopyInto(&out.Agent)
}

View File

@ -49,6 +49,22 @@ spec:
agentConfiguration:
description: AgentConfiguration defines the k3s agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the k3s agent kubernetes
processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for kube-proxy
process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking configuration.
properties:
@ -204,6 +220,22 @@ spec:
agent:
description: Agent is the agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the k3s agent
kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for kube-proxy
process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking configuration.
properties:
@ -291,6 +323,48 @@ spec:
datastore backend communication.
type: string
type: object
kubernetesComponents:
description: KubernetesComponents is the kubernetes components
configuration.
properties:
disable:
description: 'Disable do not deploy packaged components and
delete any deployed components (valid items: coredns, servicelb,
traefik,local-storage, metrics-server).'
type: string
disableHelmController:
description: DisableHelmController disable Helm controller.
type: boolean
disableKubeProxy:
description: DisableKubeProxy disable running kube-proxy.
type: boolean
disableNetworkPolicy:
description: DisableNetworkPolicy disable k3s default network
policy controller.
type: boolean
type: object
kubernetesProcesses:
description: KubernetesProcesses is the kubernetes processes configuration.
properties:
kubeAPIServerArg:
description: KubeAPIServerArgs is a customized flag for kube-apiserver
process
items:
type: string
type: array
kubeControllerManagerArgs:
description: KubeControllerManagerArgs is a customized flag
for kube-controller-manager process
items:
type: string
type: array
kubeSchedulerArgs:
description: KubeSchedulerArgs is a customized flag for kube-scheduler
process
items:
type: string
type: array
type: object
listener:
description: Listener is the listener configuration.
properties:

View File

@ -51,6 +51,23 @@ spec:
agentConfiguration:
description: AgentConfiguration defines the k3s agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the k3s
agent kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for kube-proxy
process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet
process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking
configuration.
@ -210,6 +227,23 @@ spec:
agent:
description: Agent is the agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the
k3s agent kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for
kube-proxy process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet
process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking
configuration.
@ -303,6 +337,49 @@ spec:
secure datastore backend communication.
type: string
type: object
kubernetesComponents:
description: KubernetesComponents is the kubernetes components
configuration.
properties:
disable:
description: 'Disable do not deploy packaged components
and delete any deployed components (valid items:
coredns, servicelb, traefik,local-storage, metrics-server).'
type: string
disableHelmController:
description: DisableHelmController disable Helm controller.
type: boolean
disableKubeProxy:
description: DisableKubeProxy disable running kube-proxy.
type: boolean
disableNetworkPolicy:
description: DisableNetworkPolicy disable k3s default
network policy controller.
type: boolean
type: object
kubernetesProcesses:
description: KubernetesProcesses is the kubernetes processes
configuration.
properties:
kubeAPIServerArg:
description: KubeAPIServerArgs is a customized flag
for kube-apiserver process
items:
type: string
type: array
kubeControllerManagerArgs:
description: KubeControllerManagerArgs is a customized
flag for kube-controller-manager process
items:
type: string
type: array
kubeSchedulerArgs:
description: KubeSchedulerArgs is a customized flag
for kube-scheduler process
items:
type: string
type: array
type: object
listener:
description: Listener is the listener configuration.
properties:

View File

@ -42,6 +42,7 @@ rules:
- machinepools/status
- machines
- machines/status
- machinesets
verbs:
- get
- list

View File

@ -248,10 +248,10 @@ func (r *K3sConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// Unlock any locks that might have been set during init process
r.K3sInitLock.Unlock(ctx, cluster)
// if the AgentConfiguration is missing, create a default one
if config.Spec.AgentConfiguration == nil {
log.Info("Creating default AgentConfiguration")
config.Spec.AgentConfiguration = &infrabootstrapv1.AgentConfiguration{}
// if the .spec.cluster is missing, create a default one
if config.Spec.Cluster == nil {
log.Info("Creating default .spec.cluster")
config.Spec.Cluster = &infrabootstrapv1.Cluster{}
}
// it's a control plane join
@ -277,7 +277,7 @@ func (r *K3sConfigReconciler) handleClusterNotInitialized(ctx context.Context, s
}
// if the machine has not ClusterConfiguration and InitConfiguration, requeue
if scope.Config.Spec.ServerConfiguration == nil && scope.Config.Spec.AgentConfiguration == nil {
if scope.Config.Spec.ServerConfiguration == nil && scope.Config.Spec.Cluster == nil {
scope.Info("Control plane is not ready, requeing joining control planes until ready.")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
@ -317,7 +317,7 @@ func (r *K3sConfigReconciler) handleClusterNotInitialized(ctx context.Context, s
ctx,
r.Client,
util.ObjectKey(scope.Cluster),
*metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("K3sConfig")),
*metav1.NewControllerRef(scope.Config, infrabootstrapv1.GroupVersion.WithKind("K3sConfig")),
)
if err != nil {
conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
@ -377,6 +377,14 @@ func (r *K3sConfigReconciler) handleClusterNotInitialized(ctx context.Context, s
func (r *K3sConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) {
scope.Info("Creating BootstrapData for the worker node")
machine := &clusterv1.Machine{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind())
}
// injects into config.Spec values from top level object
r.reconcileWorkerTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config)
// Ensure that agentConfiguration is properly set for joining node on the current cluster.
if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil {
return ctrl.Result{}, err
@ -384,7 +392,11 @@ func (r *K3sConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctr
return res, nil
}
joinWorkerData, err := k3stypes.MarshalJoinAgentConfiguration(scope.Config.Spec.AgentConfiguration)
if scope.Config.Spec.AgentConfiguration == nil {
scope.Config.Spec.AgentConfiguration = &infrabootstrapv1.AgentConfiguration{}
}
joinWorkerData, err := k3stypes.MarshalJoinAgentConfiguration(&scope.Config.Spec)
if err != nil {
scope.Error(err, "Failed to marshal join configuration")
return ctrl.Result{}, err
@ -432,10 +444,18 @@ func (r *K3sConfigReconciler) joinControlplane(ctx context.Context, scope *Scope
return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind())
}
if scope.Config.Spec.Cluster == nil {
scope.Config.Spec.Cluster = &infrabootstrapv1.Cluster{}
if scope.Config.Spec.ServerConfiguration == nil {
scope.Config.Spec.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{}
}
machine := &clusterv1.Machine{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind())
}
// injects into config.ClusterConfiguration values from top level object
r.reconcileTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config)
// Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster.
if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config); err != nil {
return ctrl.Result{}, err
@ -443,7 +463,7 @@ func (r *K3sConfigReconciler) joinControlplane(ctx context.Context, scope *Scope
return res, nil
}
joinData, err := k3stypes.MarshalJoinServerConfiguration(scope.Config.Spec.ServerConfiguration)
joinData, err := k3stypes.MarshalJoinServerConfiguration(&scope.Config.Spec)
if err != nil {
scope.Error(err, "Failed to marshal join configuration")
return ctrl.Result{}, err
@ -499,7 +519,7 @@ func (r *K3sConfigReconciler) generateAndStoreToken(ctx context.Context, scope *
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: bootstrapv1.GroupVersion.String(),
APIVersion: infrabootstrapv1.GroupVersion.String(),
Kind: "K3sConfig",
Name: scope.Config.Name,
UID: scope.Config.UID,
@ -579,7 +599,7 @@ func (r *K3sConfigReconciler) storeBootstrapData(ctx context.Context, scope *Sco
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: bootstrapv1.GroupVersion.String(),
APIVersion: infrabootstrapv1.GroupVersion.String(),
Kind: "K3sConfig",
Name: scope.Config.Name,
UID: scope.Config.UID,
@ -659,7 +679,7 @@ func (r *K3sConfigReconciler) MachineToBootstrapMapFunc(o client.Object) []ctrl.
}
var result []ctrl.Request
if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig") {
if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == infrabootstrapv1.GroupVersion.WithKind("K3sConfig") {
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
@ -676,7 +696,7 @@ func (r *K3sConfigReconciler) MachinePoolToBootstrapMapFunc(o client.Object) []c
var result []ctrl.Request
configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef
if configRef != nil && configRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
if configRef != nil && configRef.GroupVersionKind().GroupKind() == infrabootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
@ -707,7 +727,7 @@ func (r *K3sConfigReconciler) ClusterToK3sConfigs(o client.Object) []ctrl.Reques
for _, m := range machineList.Items {
if m.Spec.Bootstrap.ConfigRef != nil &&
m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == infrabootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
@ -721,7 +741,7 @@ func (r *K3sConfigReconciler) ClusterToK3sConfigs(o client.Object) []ctrl.Reques
for _, mp := range machinePoolList.Items {
if mp.Spec.Template.Spec.Bootstrap.ConfigRef != nil &&
mp.Spec.Template.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
mp.Spec.Template.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == infrabootstrapv1.GroupVersion.WithKind("K3sConfig").GroupKind() {
name := client.ObjectKey{Namespace: mp.Namespace, Name: mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
@ -762,3 +782,13 @@ func (r *K3sConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Contex
log.V(3).Info("Altering Spec.Version", "Version", config.Spec.Version)
}
}
func (r *K3sConfigReconciler) reconcileWorkerTopLevelObjectSettings(ctx context.Context, _ *clusterv1.Cluster, machine *clusterv1.Machine, config *infrabootstrapv1.K3sConfig) {
log := ctrl.LoggerFrom(ctx)
// If there are no Version settings defined, use Version from machine, if defined
if config.Spec.Version == "" && machine.Spec.Version != nil {
config.Spec.Version = *machine.Spec.Version
log.V(3).Info("Altering Spec.Version", "Version", config.Spec.Version)
}
}

View File

@ -45,16 +45,11 @@ type BaseUserData struct {
SentinelFileCommand string
}
func (input *BaseUserData) prepare() error {
func (input *BaseUserData) prepare() {
input.Header = cloudConfigHeader
input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...)
k3sScriptFile, err := generateBootstrapScript(input)
if err != nil {
return errors.Wrap(err, "failed to generate user data for machine install k3s")
}
input.WriteFiles = append(input.WriteFiles, *k3sScriptFile)
input.WriteFiles = append(input.WriteFiles, input.ConfigFile)
input.SentinelFileCommand = sentinelFileCommand
return nil
}
func generate(kind string, tpl string, data interface{}) ([]byte, error) {
@ -79,21 +74,3 @@ func generate(kind string, tpl string, data interface{}) ([]byte, error) {
return out.Bytes(), nil
}
var (
//go:embed k3s-install.sh
k3sBootstrapScript string
)
func generateBootstrapScript(input interface{}) (*bootstrapv1.File, error) {
k3sScript, err := generate("K3sInstallScript", k3sBootstrapScript, input)
if err != nil {
return nil, errors.Wrap(err, "failed to bootstrap script for machine joins")
}
return &bootstrapv1.File{
Path: "/usr/local/bin/k3s-install.sh",
Owner: "root",
Permissions: "0755",
Content: string(k3sScript),
}, nil
}

View File

@ -17,8 +17,6 @@
package cloudinit
import (
"github.com/pkg/errors"
"github.com/kubesphere/kubekey/util/secret"
)
@ -31,7 +29,7 @@ const (
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
- "INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC='server' /usr/local/bin/k3s-install.sh"
{{- template "commands" .PostK3sCommands }}
`
)
@ -49,11 +47,7 @@ func NewInitControlPlane(input *ControlPlaneInput) ([]byte, error) {
input.Header = cloudConfigHeader
input.WriteFiles = input.Certificates.AsFiles()
input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...)
k3sScriptFile, err := generateBootstrapScript(input)
if err != nil {
return nil, errors.Wrap(err, "failed to generate user data for machine install k3s")
}
input.WriteFiles = append(input.WriteFiles, *k3sScriptFile)
input.WriteFiles = append(input.WriteFiles, input.ConfigFile)
input.SentinelFileCommand = sentinelFileCommand
userData, err := generate("InitControlplane", controlPlaneCloudInit, input)
if err != nil {

View File

@ -29,16 +29,14 @@ const (
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
- "INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC='server' /usr/local/bin/k3s-install.sh"
{{- template "commands" .PostK3sCommands }}
`
)
// NewJoinControlPlane returns the cloudinit string to be used on joining a control plane instance.
func NewJoinControlPlane(input *ControlPlaneInput) ([]byte, error) {
if err := input.prepare(); err != nil {
return nil, err
}
input.prepare()
userData, err := generate("JoinControlplane", controlPlaneJoinCloudInit, input)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate user data for machine joining control plane")

View File

@ -29,7 +29,7 @@ const (
content: "This placeholder file is used to create the /run/cluster-api sub directory in a way that is compatible with both Linux and Windows (mkdir -p /run/cluster-api does not work with Windows)"
runcmd:
{{- template "commands" .PreK3sCommands }}
- 'INSTALL_K3S_SKIP_DOWNLOAD=true /usr/local/bin/k3s-install.sh'
- "INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC='agent' /usr/local/bin/k3s-install.sh"
{{- template "commands" .PostK3sCommands }}
`
)
@ -41,9 +41,7 @@ type NodeInput struct {
// NewNode returns the cloud-init for joining a node instance.
func NewNode(input *NodeInput) ([]byte, error) {
if err := input.prepare(); err != nil {
return nil, err
}
input.prepare()
userData, err := generate("JoinWorker", workerCloudInit, input)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate user data for machine joining worker node")

View File

@ -28,10 +28,10 @@ type K3sServerConfiguration struct {
DataStoreKeyFile string `json:"datastore-keyfile,omitempty"`
// Cluster
Token string `json:"token,omitempty"`
TokenFile string `json:"token-file,omitempty"`
Server string `json:"server,omitempty"`
CloudInit bool `json:"cloud-init,omitempty"`
Token string `json:"token,omitempty"`
TokenFile string `json:"token-file,omitempty"`
Server string `json:"server,omitempty"`
ClusterInit bool `json:"cluster-init,omitempty"`
// Listener
// BindAddress k3s bind address.
@ -59,6 +59,27 @@ type K3sServerConfiguration struct {
// FlannelBackend One of none, vxlan, ipsec, host-gw, or wireguard. (default: vxlan)
FlannelBackend string `json:"flannel-backend,omitempty"`
// Kubernetes components
// Disable do not deploy packaged components and delete any deployed components
// (valid items: coredns, servicelb, traefik,local-storage, metrics-server).
Disable string `json:"disable,omitempty"`
// DisableKubeProxy disable running kube-proxy.
DisableKubeProxy bool `json:"disable-kube-roxy,omitempty"`
// DisableNetworkPolicy disable k3s default network policy controller.
DisableNetworkPolicy bool `json:"disable-network-policy,omitempty"`
// DisableHelmController disable Helm controller.
DisableHelmController bool `json:"disable-helm-controller,omitempty"`
// Kubernetes processes
// DisableCloudController Disable k3s default cloud controller manager.
DisableCloudController bool `json:"disable-cloud-controller,omitempty"`
// KubeAPIServerArgs Customized flag for kube-apiserver process.
KubeAPIServerArgs []string `json:"kube-apiserver-arg,omitempty"`
// KubeControllerManagerArgs Customized flag for kube-controller-manager process.
KubeControllerManagerArgs []string `json:"kube-controller-manager-arg,omitempty"`
// KubeSchedulerArgs Customized flag for kube-scheduler process.
KubeSchedulerArgs []string `json:"kube-scheduler-args,omitempty"`
// Agent
K3sAgentConfiguration `json:",inline"`
}
@ -101,4 +122,10 @@ type K3sAgentConfiguration struct {
NodeExternalIP string `json:"node-external-ip,omitempty"`
// ResolvConf Path to Kubelet resolv.conf file.
ResolvConf string `json:"resolv-conf,omitempty"`
// Kubernetes
// KubeletArgs Customized flag for kubelet process.
KubeletArgs []string `json:"kubelet-arg,omitempty"`
// KubeProxyArgs Customized flag for kube-proxy process.
KubeProxyArgs []string `json:"kube-proxy-arg,omitempty"`
}

View File

@ -17,6 +17,9 @@
package types
import (
"fmt"
"strings"
"github.com/jinzhu/copier"
kubeyaml "sigs.k8s.io/yaml"
@ -27,13 +30,26 @@ import (
func MarshalInitServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec, token string) (string, error) {
obj := spec.ServerConfiguration
serverConfig := &K3sServerConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
if err := copier.Copy(serverConfig, obj.Database); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.Listener); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.Networking); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.KubernetesComponents); err != nil {
return "", err
}
serverConfig.Token = token
serverConfig.ClusterInit = *obj.Database.ClusterInit
serverConfig.CloudInit = spec.ServerConfiguration.Database.ClusterInit
serverConfig.DisableCloudController = true
serverConfig.KubeAPIServerArgs = append(obj.KubernetesProcesses.KubeAPIServerArgs, "anonymous-auth=true", getTLSCipherSuiteArg())
serverConfig.KubeControllerManagerArgs = append(obj.KubernetesProcesses.KubeControllerManagerArgs, "cloud-provider=external")
serverConfig.KubeSchedulerArgs = obj.KubernetesProcesses.KubeSchedulerArgs
serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{
NodeName: obj.Agent.Node.NodeName,
@ -48,6 +64,8 @@ func MarshalInitServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec, token
NodeIP: obj.Agent.Networking.NodeIP,
NodeExternalIP: obj.Agent.Networking.NodeExternalIP,
ResolvConf: obj.Agent.Networking.ResolvConf,
KubeletArgs: obj.Agent.KubernetesAgentProcesses.KubeletArgs,
KubeProxyArgs: obj.Agent.KubernetesAgentProcesses.KubeProxyArgs,
}
b, err := kubeyaml.Marshal(serverConfig)
@ -58,11 +76,30 @@ func MarshalInitServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec, token
}
// MarshalJoinServerConfiguration marshals the join ServerConfiguration object into a string.
func MarshalJoinServerConfiguration(obj *infrabootstrapv1.ServerConfiguration) (string, error) {
func MarshalJoinServerConfiguration(spec *infrabootstrapv1.K3sConfigSpec) (string, error) {
obj := spec.ServerConfiguration
serverConfig := &K3sServerConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
if err := copier.Copy(serverConfig, obj.Database); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.Listener); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.Networking); err != nil {
return "", err
}
if err := copier.Copy(serverConfig, obj.KubernetesComponents); err != nil {
return "", err
}
serverConfig.TokenFile = spec.Cluster.TokenFile
serverConfig.Token = spec.Cluster.Token
serverConfig.Server = spec.Cluster.Server
serverConfig.DisableCloudController = true
serverConfig.KubeAPIServerArgs = append(obj.KubernetesProcesses.KubeAPIServerArgs, "anonymous-auth=true", getTLSCipherSuiteArg())
serverConfig.KubeControllerManagerArgs = append(obj.KubernetesProcesses.KubeControllerManagerArgs, "cloud-provider=external")
serverConfig.KubeSchedulerArgs = obj.KubernetesProcesses.KubeSchedulerArgs
serverConfig.K3sAgentConfiguration = K3sAgentConfiguration{
NodeName: obj.Agent.Node.NodeName,
@ -77,6 +114,8 @@ func MarshalJoinServerConfiguration(obj *infrabootstrapv1.ServerConfiguration) (
NodeIP: obj.Agent.Networking.NodeIP,
NodeExternalIP: obj.Agent.Networking.NodeExternalIP,
ResolvConf: obj.Agent.Networking.ResolvConf,
KubeletArgs: obj.Agent.KubernetesAgentProcesses.KubeletArgs,
KubeProxyArgs: obj.Agent.KubernetesAgentProcesses.KubeProxyArgs,
}
b, err := kubeyaml.Marshal(serverConfig)
@ -87,15 +126,52 @@ func MarshalJoinServerConfiguration(obj *infrabootstrapv1.ServerConfiguration) (
}
// MarshalJoinAgentConfiguration marshals the join AgentConfiguration object into a string.
func MarshalJoinAgentConfiguration(obj *infrabootstrapv1.AgentConfiguration) (string, error) {
serverConfig := &K3sAgentConfiguration{}
if err := copier.Copy(serverConfig, obj); err != nil {
func MarshalJoinAgentConfiguration(spec *infrabootstrapv1.K3sConfigSpec) (string, error) {
obj := spec.AgentConfiguration
agentConfig := &K3sAgentConfiguration{}
if err := copier.Copy(agentConfig, obj.Node); err != nil {
return "", err
}
if err := copier.Copy(agentConfig, obj.Networking); err != nil {
return "", err
}
if err := copier.Copy(agentConfig, obj.Runtime); err != nil {
return "", err
}
if err := copier.Copy(agentConfig, obj.KubernetesAgentProcesses); err != nil {
return "", err
}
b, err := kubeyaml.Marshal(serverConfig)
agentConfig.TokenFile = spec.Cluster.TokenFile
agentConfig.Token = spec.Cluster.Token
agentConfig.Server = spec.Cluster.Server
b, err := kubeyaml.Marshal(agentConfig)
if err != nil {
return "", err
}
return string(b), nil
}
func getTLSCipherSuiteArg() string {
ciphers := []string{
// Modern Compatibility recommended configuration in
// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
}
ciphersList := ""
for _, cc := range ciphers {
ciphersList += cc + ","
}
ciphersList = strings.TrimRight(ciphersList, ",")
return fmt.Sprintf("tls-cipher-suites=%s", ciphersList)
}

View File

@ -99,6 +99,9 @@ func (r *KKInstanceReconciler) reconcileDeletingBootstrap(_ context.Context, ssh
if err := svc.DaemonReload(); err != nil {
return err
}
if err := svc.UninstallK3s(); err != nil {
return err
}
return nil
}

View File

@ -20,19 +20,19 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
// Conditions and condition Reasons for the KubeadmControlPlane object.
// Conditions and condition Reasons for the K3sControlPlane object.
const (
// MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane.
// MachinesReadyCondition reports an aggregate of current status of the machines controlled by the K3sControlPlane.
MachinesReadyCondition clusterv1.ConditionType = "MachinesReady"
)
const (
// CertificatesAvailableCondition documents that cluster certificates were generated as part of the
// processing of a a KubeadmControlPlane object.
// processing of a a K3sControlPlane object.
CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable"
// CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting
// CertificatesGenerationFailedReason (Severity=Warning) documents a K3sControlPlane controller detecting
// an error while generating certificates; those kind of errors are usually temporary and the controller
// automatically recover from them.
CertificatesGenerationFailedReason = "CertificatesGenerationFailed"
@ -43,29 +43,29 @@ const (
// and so the control plane is available and an API server instance is ready for processing requests.
AvailableCondition clusterv1.ConditionType = "Available"
// WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first
// WaitingForKubeadmInitReason (Severity=Info) documents a K3sControlPlane object waiting for the first
// control plane instance to complete the kubeadm init operation.
WaitingForKubeadmInitReason = "WaitingForKubeadmInit"
)
const (
// MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the K3sControlPlane
// is up to date. When this condition is false, the KubeadmControlPlane is executing a rolling upgrade.
// is up to date. When this condition is false, the K3sControlPlane is executing a rolling upgrade.
MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate"
// RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a
// RollingUpdateInProgressReason (Severity=Warning) documents a K3sControlPlane object executing a
// rolling upgrade for aligning the machines spec to the desired state.
RollingUpdateInProgressReason = "RollingUpdateInProgress"
)
const (
// ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines.
// ResizedCondition documents a K3sControlPlane that is resizing the set of controlled machines.
ResizedCondition clusterv1.ConditionType = "Resized"
// ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas.
// ScalingUpReason (Severity=Info) documents a K3sControlPlane that is increasing the number of replicas.
ScalingUpReason = "ScalingUp"
// ScalingDownReason (Severity=Info) documents a KubeadmControlPlane that is decreasing the number of replicas.
// ScalingDownReason (Severity=Info) documents a K3sControlPlane that is decreasing the number of replicas.
ScalingDownReason = "ScalingDown"
)
@ -121,15 +121,15 @@ const (
// when generating the machine object.
MachinesCreatedCondition clusterv1.ConditionType = "MachinesCreated"
// InfrastructureTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// InfrastructureTemplateCloningFailedReason (Severity=Error) documents a K3sControlPlane failing to
// clone the infrastructure template.
InfrastructureTemplateCloningFailedReason = "InfrastructureTemplateCloningFailed"
// BootstrapTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// BootstrapTemplateCloningFailedReason (Severity=Error) documents a K3sControlPlane failing to
// clone the bootstrap template.
BootstrapTemplateCloningFailedReason = "BootstrapTemplateCloningFailed"
// MachineGenerationFailedReason (Severity=Error) documents a KubeadmControlPlane failing to
// MachineGenerationFailedReason (Severity=Error) documents a K3sControlPlane failing to
// generate a machine object.
MachineGenerationFailedReason = "MachineGenerationFailed"
)

View File

@ -26,7 +26,7 @@ import (
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
)
// RolloutStrategyType defines the rollout strategies for a KubeadmControlPlane.
// RolloutStrategyType defines the rollout strategies for a K3sControlPlane.
type RolloutStrategyType string
const (
@ -73,7 +73,7 @@ type K3sControlPlaneSpec struct {
// RolloutAfter is a field to indicate a rollout should be performed
// after the specified time even if no changes have been made to the
// KubeadmControlPlane.
// K3sControlPlane.
//
// +optional
RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"`
@ -179,7 +179,7 @@ type K3sControlPlaneStatus struct {
// +optional
Initialized bool `json:"initialized"`
// Ready denotes that the KubeadmControlPlane API Server is ready to
// Ready denotes that the K3sControlPlane API Server is ready to
// receive requests.
// +optional
Ready bool `json:"ready"`
@ -199,7 +199,7 @@ type K3sControlPlaneStatus struct {
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions defines current service state of the KubeadmControlPlane.
// Conditions defines current service state of the K3sControlPlane.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/util/version"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@ -66,8 +67,12 @@ func defaultK3sControlPlaneSpec(s *K3sControlPlaneSpec, namespace string) {
s.Version = "v" + s.Version
}
if s.K3sConfigSpec.ServerConfiguration.Database.DataStoreEndPoint == "" && s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit {
s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit = true
if s.K3sConfigSpec.ServerConfiguration == nil {
s.K3sConfigSpec.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{}
}
if s.K3sConfigSpec.ServerConfiguration.Database.DataStoreEndPoint == "" && s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit == nil {
s.K3sConfigSpec.ServerConfiguration.Database.ClusterInit = pointer.Bool(true)
}
infrabootstrapv1.DefaultK3sConfigSpec(&s.K3sConfigSpec)
@ -105,7 +110,7 @@ func (in *K3sControlPlane) ValidateCreate() error {
allErrs = append(allErrs, validateServerConfiguration(spec.K3sConfigSpec.ServerConfiguration, nil, field.NewPath("spec", "k3sConfigSpec", "serverConfiguration"))...)
allErrs = append(allErrs, spec.K3sConfigSpec.Validate(field.NewPath("spec", "k3sConfigSpec"))...)
if len(allErrs) > 0 {
return apierrors.NewInvalid(GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), in.Name, allErrs)
return apierrors.NewInvalid(GroupVersion.WithKind("K3sControlPlane").GroupKind(), in.Name, allErrs)
}
return nil
}
@ -290,7 +295,7 @@ func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32,
allErrs,
field.Required(
pathPrefix.Child("rollingUpdate"),
"when KubeadmControlPlane is configured to scale-in, replica count needs to be at least 3",
"when K3sControlPlane is configured to scale-in, replica count needs to be at least 3",
),
)
}
@ -315,36 +320,42 @@ func validateServerConfiguration(newServerConfiguration, oldServerConfiguration
return allErrs
}
if newServerConfiguration.Database.ClusterInit && newServerConfiguration.Database.DataStoreEndPoint != "" {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "clusterInit"),
"cannot have both external and local etcd",
),
)
}
// update validations
if oldServerConfiguration != nil {
if newServerConfiguration.Database.ClusterInit && oldServerConfiguration.Database.DataStoreEndPoint != "" {
if newServerConfiguration.Database.ClusterInit != nil {
if *newServerConfiguration.Database.ClusterInit && newServerConfiguration.Database.DataStoreEndPoint != "" {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "clusterInit"),
"cannot change between external and local etcd",
"cannot have both external and local etcd",
),
)
}
}
if newServerConfiguration.Database.DataStoreEndPoint != "" && oldServerConfiguration.Database.ClusterInit {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "dataStoreEndPoint"),
"cannot change between external and local etcd",
),
)
// update validations
if oldServerConfiguration != nil {
if newServerConfiguration.Database.ClusterInit != nil {
if *newServerConfiguration.Database.ClusterInit && oldServerConfiguration.Database.DataStoreEndPoint != "" {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "clusterInit"),
"cannot change between external and local etcd",
),
)
}
}
if oldServerConfiguration.Database.ClusterInit != nil {
if newServerConfiguration.Database.DataStoreEndPoint != "" && *oldServerConfiguration.Database.ClusterInit {
allErrs = append(
allErrs,
field.Forbidden(
pathPrefix.Child("database", "dataStoreEndPoint"),
"cannot change between external and local etcd",
),
)
}
}
}

View File

@ -32,7 +32,7 @@ type K3sControlPlaneTemplateResource struct {
Spec K3sControlPlaneTemplateResourceSpec `json:"spec"`
}
// K3sControlPlaneTemplateResourceSpec defines the desired state of KubeadmControlPlane.
// K3sControlPlaneTemplateResourceSpec defines the desired state of K3sControlPlane.
// NOTE: K3sControlPlaneTemplateResourceSpec is similar to K3sControlPlaneSpec but
// omits Replicas and Version fields. These fields do not make sense on the K3sControlPlaneTemplate,
// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot
@ -49,7 +49,7 @@ type K3sControlPlaneTemplateResourceSpec struct {
// RolloutAfter is a field to indicate a rollout should be performed
// after the specified time even if no changes have been made to the
// KubeadmControlPlane.
// K3sControlPlane.
//
// +optional
RolloutAfter *metav1.Time `json:"rolloutAfter,omitempty"`
@ -62,11 +62,11 @@ type K3sControlPlaneTemplateResourceSpec struct {
}
// K3sControlPlaneTemplateMachineTemplate defines the template for Machines
// in a KubeadmControlPlaneTemplate object.
// NOTE: KubeadmControlPlaneTemplateMachineTemplate is similar to KubeadmControlPlaneMachineTemplate but
// omits ObjectMeta and InfrastructureRef fields. These fields do not make sense on the KubeadmControlPlaneTemplate,
// in a K3sControlPlaneTemplate object.
// NOTE: K3sControlPlaneTemplateMachineTemplate is similar to K3sControlPlaneMachineTemplate but
// omits ObjectMeta and InfrastructureRef fields. These fields do not make sense on the K3sControlPlaneTemplate,
// because they are calculated by the Cluster topology reconciler during reconciliation and thus cannot
// be configured on the KubeadmControlPlaneTemplate.
// be configured on the K3sControlPlaneTemplate.
type K3sControlPlaneTemplateMachineTemplate struct {
// NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node
// The default value is 0, meaning that the node can be drained without any time limitations.

View File

@ -55,7 +55,7 @@ var _ webhook.Validator = &K3sControlPlaneTemplate{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *K3sControlPlaneTemplate) ValidateCreate() error {
// NOTE: KubeadmControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook
// NOTE: K3sControlPlaneTemplate is behind ClusterTopology feature gate flag; the web hook
// must prevent creating new objects in case the feature flag is disabled.
if !feature.Gates.Enabled(feature.ClusterTopology) {
return field.Forbidden(

View File

@ -46,6 +46,22 @@ spec:
agentConfiguration:
description: AgentConfiguration defines the k3s agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the k3s agent
kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for kube-proxy
process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking configuration.
properties:
@ -203,6 +219,23 @@ spec:
agent:
description: Agent is the agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the k3s
agent kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for kube-proxy
process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet
process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking
configuration.
@ -292,6 +325,49 @@ spec:
datastore backend communication.
type: string
type: object
kubernetesComponents:
description: KubernetesComponents is the kubernetes components
configuration.
properties:
disable:
description: 'Disable do not deploy packaged components
and delete any deployed components (valid items: coredns,
servicelb, traefik,local-storage, metrics-server).'
type: string
disableHelmController:
description: DisableHelmController disable Helm controller.
type: boolean
disableKubeProxy:
description: DisableKubeProxy disable running kube-proxy.
type: boolean
disableNetworkPolicy:
description: DisableNetworkPolicy disable k3s default
network policy controller.
type: boolean
type: object
kubernetesProcesses:
description: KubernetesProcesses is the kubernetes processes
configuration.
properties:
kubeAPIServerArg:
description: KubeAPIServerArgs is a customized flag for
kube-apiserver process
items:
type: string
type: array
kubeControllerManagerArgs:
description: KubeControllerManagerArgs is a customized
flag for kube-controller-manager process
items:
type: string
type: array
kubeSchedulerArgs:
description: KubeSchedulerArgs is a customized flag for
kube-scheduler process
items:
type: string
type: array
type: object
listener:
description: Listener is the listener configuration.
properties:
@ -435,7 +511,7 @@ spec:
rolloutAfter:
description: RolloutAfter is a field to indicate a rollout should
be performed after the specified time even if no changes have been
made to the KubeadmControlPlane.
made to the K3sControlPlane.
format: date-time
type: string
rolloutStrategy:
@ -477,7 +553,7 @@ spec:
description: K3sControlPlaneStatus defines the observed state of K3sControlPlane
properties:
conditions:
description: Conditions defines current service state of the KubeadmControlPlane.
description: Conditions defines current service state of the K3sControlPlane.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
@ -540,8 +616,8 @@ spec:
format: int64
type: integer
ready:
description: Ready denotes that the KubeadmControlPlane API Server
is ready to receive requests.
description: Ready denotes that the K3sControlPlane API Server is
ready to receive requests.
type: boolean
readyReplicas:
description: Total number of fully running and ready control plane

View File

@ -43,7 +43,7 @@ spec:
properties:
spec:
description: 'K3sControlPlaneTemplateResourceSpec defines the
desired state of KubeadmControlPlane. NOTE: K3sControlPlaneTemplateResourceSpec
desired state of K3sControlPlane. NOTE: K3sControlPlaneTemplateResourceSpec
is similar to K3sControlPlaneSpec but omits Replicas and Version
fields. These fields do not make sense on the K3sControlPlaneTemplate,
because they are calculated by the Cluster topology reconciler
@ -57,6 +57,23 @@ spec:
description: AgentConfiguration defines the k3s agent
configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines the
k3s agent kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag for
kube-proxy process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for kubelet
process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent networking
configuration.
@ -222,6 +239,23 @@ spec:
agent:
description: Agent is the agent configuration.
properties:
kubernetesAgentProcesses:
description: KubernetesAgentProcesses defines
the k3s agent kubernetes processes configuration.
properties:
kubeProxyArgs:
description: KubeProxyArgs Customized flag
for kube-proxy process
items:
type: string
type: array
kubeletArgs:
description: KubeletArgs Customized flag for
kubelet process
items:
type: string
type: array
type: object
networking:
description: Networking defines the k3s agent
networking configuration.
@ -315,6 +349,51 @@ spec:
to secure datastore backend communication.
type: string
type: object
kubernetesComponents:
description: KubernetesComponents is the kubernetes
components configuration.
properties:
disable:
description: 'Disable do not deploy packaged components
and delete any deployed components (valid items:
coredns, servicelb, traefik,local-storage, metrics-server).'
type: string
disableHelmController:
description: DisableHelmController disable Helm
controller.
type: boolean
disableKubeProxy:
description: DisableKubeProxy disable running
kube-proxy.
type: boolean
disableNetworkPolicy:
description: DisableNetworkPolicy disable k3s
default network policy controller.
type: boolean
type: object
kubernetesProcesses:
description: KubernetesProcesses is the kubernetes
processes configuration.
properties:
kubeAPIServerArg:
description: KubeAPIServerArgs is a customized
flag for kube-apiserver process
items:
type: string
type: array
kubeControllerManagerArgs:
description: KubeControllerManagerArgs is a customized
flag for kube-controller-manager process
items:
type: string
type: array
kubeSchedulerArgs:
description: KubeSchedulerArgs is a customized
flag for kube-scheduler process
items:
type: string
type: array
type: object
listener:
description: Listener is the listener configuration.
properties:
@ -397,7 +476,7 @@ spec:
rolloutAfter:
description: RolloutAfter is a field to indicate a rollout
should be performed after the specified time even if no
changes have been made to the KubeadmControlPlane.
changes have been made to the K3sControlPlane.
format: date-time
type: string
rolloutStrategy:

View File

@ -21,6 +21,7 @@ spec:
args:
- "--leader-elect"
- "--metrics-bind-addr=localhost:8080"
- "--v=5"
image: controller:latest
name: manager
env:

View File

@ -35,13 +35,13 @@ import (
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
"sigs.k8s.io/cluster-api/util/kubeconfig"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
k3sCluster "github.com/kubesphere/kubekey/controlplane/k3s/pkg/cluster"
"github.com/kubesphere/kubekey/controlplane/k3s/pkg/kubeconfig"
"github.com/kubesphere/kubekey/util/secret"
)

View File

@ -180,14 +180,14 @@ func (r *K3sControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Requ
if errors.As(err, &connFailure) {
log.Info("Could not connect to workload cluster to fetch status", "err", err.Error())
} else {
log.Error(err, "Failed to update KubeadmControlPlane Status")
log.Error(err, "Failed to update K3sControlPlane Status")
retErr = kerrors.NewAggregate([]error{retErr, err})
}
}
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
// Always attempt to Patch the K3sControlPlane object and status after each reconciliation.
if err := patchK3sControlPlane(ctx, patchHelper, kcp); err != nil {
log.Error(err, "Failed to patch KubeadmControlPlane")
log.Error(err, "Failed to patch K3sControlPlane")
retErr = kerrors.NewAggregate([]error{retErr, err})
}
@ -240,10 +240,10 @@ func patchK3sControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *i
)
}
// reconcile handles KubeadmControlPlane reconciliation.
// reconcile handles K3sControlPlane reconciliation.
func (r *K3sControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane) (res ctrl.Result, retErr error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log.Info("Reconcile KubeadmControlPlane")
log.Info("Reconcile K3sControlPlane")
// Make sure to reconcile the external infrastructure reference.
if err := r.reconcileExternalReference(ctx, cluster, &kcp.Spec.MachineTemplate.InfrastructureRef); err != nil {
@ -257,11 +257,6 @@ func (r *K3sControlPlaneReconciler) reconcile(ctx context.Context, cluster *clus
}
// Generate Cluster Certificates if needed
config := kcp.Spec.K3sConfigSpec.DeepCopy()
config.AgentConfiguration = nil
if config.ServerConfiguration == nil {
config.ServerConfiguration = &infrabootstrapv1.ServerConfiguration{}
}
certificates := secret.NewCertificatesForInitialControlPlane()
controllerRef := metav1.NewControllerRef(kcp, infracontrolplanev1.GroupVersion.WithKind("K3sControlPlane"))
if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(cluster), *controllerRef); err != nil {
@ -373,7 +368,7 @@ func (r *K3sControlPlaneReconciler) reconcile(ctx context.Context, cluster *clus
// Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064.
func (r *K3sControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *infracontrolplanev1.K3sControlPlane) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log.Info("Reconcile KubeadmControlPlane deletion")
log.Info("Reconcile K3sControlPlane deletion")
// Gets all machines, not just control plane machines.
allMachines, err := r.managementCluster.GetMachinesForCluster(ctx, cluster)

View File

@ -103,18 +103,17 @@ func (r *K3sControlPlaneReconciler) updateStatus(ctx context.Context, kcp *infra
if err != nil {
return err
}
log.Info("ClusterStatus", "workload", status)
kcp.Status.ReadyReplicas = status.ReadyNodes
kcp.Status.UnavailableReplicas = replicas - status.ReadyNodes
// This only gets initialized once and does not change if the k3s config map goes away.
if status.HasK3sConfig {
if kcp.Status.ReadyReplicas > 0 {
kcp.Status.Ready = true
kcp.Status.Initialized = true
conditions.MarkTrue(kcp, infracontrolplanev1.AvailableCondition)
}
if kcp.Status.ReadyReplicas > 0 {
kcp.Status.Ready = true
}
return nil
}

View File

@ -31,8 +31,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
@ -116,6 +116,7 @@ func InitFlags(fs *pflag.FlagSet) {
}
func main() {
klog.InitFlags(nil)
rand.Seed(time.Now().UnixNano())
InitFlags(pflag.CommandLine)

View File

@ -104,12 +104,12 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains {
return c.Cluster.Status.FailureDomains
}
// Version returns the KThreesControlPlane's version.
// Version returns the K3sControlPlane's version.
func (c *ControlPlane) Version() *string {
return &c.KCP.Spec.Version
}
// MachineInfrastructureTemplateRef returns the KubeadmControlPlane's infrastructure template for Machines.
// MachineInfrastructureTemplateRef returns the K3sControlPlane's infrastructure template for Machines.
func (c *ControlPlane) MachineInfrastructureTemplateRef() *corev1.ObjectReference {
return &c.KCP.Spec.MachineTemplate.InfrastructureRef
}

View File

@ -17,6 +17,7 @@
package cluster
import (
"encoding/json"
"reflect"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -36,7 +37,7 @@ func MatchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, mach
return matchMachineTemplateMetadata(kcp, machine)
},
collections.MatchesKubernetesVersion(kcp.Spec.Version),
MatchesK3sBootstrapConfig(machineConfigs, kcp),
//MatchesK3sBootstrapConfig(machineConfigs, kcp),
MatchesTemplateClonedFrom(infraConfigs, kcp),
)
}
@ -83,6 +84,11 @@ func MatchesK3sBootstrapConfig(machineConfigs map[string]*infrabootstrapv1.K3sCo
return false
}
// Check if KCP and machine ClusterConfiguration matches, if not return
if match := matchClusterConfiguration(kcp, machine); !match {
return false
}
bootstrapRef := machine.Spec.Bootstrap.ConfigRef
if bootstrapRef == nil {
// Missing bootstrap reference should not be considered as unmatching.
@ -109,6 +115,40 @@ func MatchesK3sBootstrapConfig(machineConfigs map[string]*infrabootstrapv1.K3sCo
}
}
// matchClusterConfiguration verifies if KCP and machine ClusterConfiguration matches.
// NOTE: Machines that have K3sClusterConfigurationAnnotation will have to match with KCP ClusterConfiguration.
// If the annotation is not present (machine is either old or adopted), we won't roll out on any possible changes
// made in KCP's ClusterConfiguration given that we don't have enough information to make a decision.
// Users should use KCP.Spec.RolloutAfter field to force a rollout in this case.
func matchClusterConfiguration(kcp *infracontrolplanev1.K3sControlPlane, machine *clusterv1.Machine) bool {
machineClusterConfigStr, ok := machine.GetAnnotations()[infracontrolplanev1.K3sServerConfigurationAnnotation]
if !ok {
// We don't have enough information to make a decision; don't' trigger a roll out.
return true
}
machineClusterConfig := &infrabootstrapv1.Cluster{}
// ClusterConfiguration annotation is not correct, only solution is to rollout.
// The call to json.Unmarshal has to take a pointer to the pointer struct defined above,
// otherwise we won't be able to handle a nil ClusterConfiguration (that is serialized into "null").
// See https://github.com/kubernetes-sigs/cluster-api/issues/3353.
if err := json.Unmarshal([]byte(machineClusterConfigStr), &machineClusterConfig); err != nil {
return false
}
// If any of the compared values are nil, treat them the same as an empty ClusterConfiguration.
if machineClusterConfig == nil {
machineClusterConfig = &infrabootstrapv1.Cluster{}
}
kcpLocalClusterConfiguration := kcp.Spec.K3sConfigSpec.Cluster
if kcpLocalClusterConfiguration == nil {
kcpLocalClusterConfiguration = &infrabootstrapv1.Cluster{}
}
// Compare and return.
return reflect.DeepEqual(machineClusterConfig, kcpLocalClusterConfiguration)
}
// matchInitOrJoinConfiguration verifies if KCP and machine ServerConfiguration or AgentConfiguration matches.
// NOTE: By extension this method takes care of detecting changes in other fields of the K3sConfig configuration (e.g. Files, Mounts etc.)
func matchInitOrJoinConfiguration(machineConfig *infrabootstrapv1.K3sConfig, kcp *infracontrolplanev1.K3sControlPlane) bool {
@ -163,28 +203,14 @@ func cleanupConfigFields(kcpConfig *infrabootstrapv1.K3sConfigSpec, machineConfi
kcpConfig.Cluster = nil
machineConfig.Spec.Cluster = nil
kcpConfig.ServerConfiguration = nil
machineConfig.Spec.ServerConfiguration = nil
// If KCP JoinConfiguration is not present, set machine JoinConfiguration to nil (nothing can trigger rollout here).
// NOTE: this is required because CABPK applies an empty joinConfiguration in case no one is provided.
if kcpConfig.AgentConfiguration == nil {
machineConfig.Spec.AgentConfiguration = nil
}
// Cleanup JoinConfiguration.Discovery from kcpConfig and machineConfig, because those info are relevant only for
// the join process and not for comparing the configuration of the machine.
emptyDiscovery := &infrabootstrapv1.Cluster{}
if kcpConfig.Cluster != nil {
kcpConfig.Cluster = emptyDiscovery
}
if machineConfig.Spec.Cluster != nil {
machineConfig.Spec.Cluster = emptyDiscovery
}
// If KCP JoinConfiguration.ControlPlane is not present, set machine join configuration to nil (nothing can trigger rollout here).
// NOTE: this is required because CABPK applies an empty joinConfiguration.ControlPlane in case no one is provided.
if kcpConfig.Cluster != nil && kcpConfig.Cluster.Server == "" &&
machineConfig.Spec.Cluster != nil {
machineConfig.Spec.Cluster.Server = ""
}
}
// matchMachineTemplateMetadata matches the machine template object meta information,

View File

@ -26,8 +26,7 @@ import (
)
const (
labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
labelNodeRoleControlPlane = "node-role.kubernetes.io/master"
)
// WorkloadCluster defines all behaviors necessary to upgrade kubernetes on a workload cluster
@ -51,33 +50,30 @@ type Status struct {
Nodes int32
// ReadyNodes are the count of nodes that are reporting ready
ReadyNodes int32
// HasK3sConfig will be true if the kubeadm config map has been uploaded, false otherwise.
HasK3sConfig bool
}
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
controlPlaneNodes := &corev1.NodeList{}
controlPlaneNodeNames := sets.NewString()
for _, label := range []string{labelNodeRoleOldControlPlane, labelNodeRoleControlPlane} {
nodes := &corev1.NodeList{}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(map[string]string{
label: "",
})); err != nil {
return nil, err
nodes := &corev1.NodeList{}
labels := map[string]string{
labelNodeRoleControlPlane: "true",
}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
return nil, err
}
for i := range nodes.Items {
node := nodes.Items[i]
// Continue if we already added that node.
if controlPlaneNodeNames.Has(node.Name) {
continue
}
for i := range nodes.Items {
node := nodes.Items[i]
// Continue if we already added that node.
if controlPlaneNodeNames.Has(node.Name) {
continue
}
controlPlaneNodeNames.Insert(node.Name)
controlPlaneNodes.Items = append(controlPlaneNodes.Items, node)
}
controlPlaneNodeNames.Insert(node.Name)
controlPlaneNodes.Items = append(controlPlaneNodes.Items, node)
}
return controlPlaneNodes, nil

View File

@ -29,6 +29,7 @@ import (
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
@ -84,6 +85,9 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane
// components running in a static pod generated by kubeadm. This operation is best effort, in the sense that in case
// of problems in retrieving the pod status, it sets the condition to Unknown state without returning any error.
func (w *Workload) UpdateAgentConditions(ctx context.Context, controlPlane *ControlPlane) {
log := ctrl.LoggerFrom(ctx)
log.Info("Updating Agent conditions")
allMachinePodConditions := []clusterv1.ConditionType{
infracontrolplanev1.MachineAgentHealthyCondition,
}

View File

@ -0,0 +1,189 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kubeconfig implements the kubeconfig generation logic.
package kubeconfig
import (
"context"
"crypto"
"crypto/x509"
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/certs"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/util/secret"
)
var (
// ErrDependentCertificateNotFound is returned when the dependent certificate is not found.
ErrDependentCertificateNotFound = errors.New("could not find secret ca")
)
// New creates a new Kubeconfig using the cluster name and specified endpoint.
func New(clusterName, endpoint string, clientCACert *x509.Certificate, clientCAKey crypto.Signer, serverCACert *x509.Certificate) (*api.Config, error) {
cfg := &certs.Config{
CommonName: "kubernetes-admin",
Organization: []string{"system:masters"},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
clientKey, err := certs.NewPrivateKey()
if err != nil {
return nil, errors.Wrap(err, "unable to create private key")
}
clientCert, err := cfg.NewSignedCert(clientKey, clientCACert, clientCAKey)
if err != nil {
return nil, errors.Wrap(err, "unable to sign certificate")
}
userName := fmt.Sprintf("%s-admin", clusterName)
contextName := fmt.Sprintf("%s@%s", userName, clusterName)
return &api.Config{
Clusters: map[string]*api.Cluster{
clusterName: {
Server: endpoint,
CertificateAuthorityData: certs.EncodeCertPEM(serverCACert),
},
},
Contexts: map[string]*api.Context{
contextName: {
Cluster: clusterName,
AuthInfo: userName,
},
},
AuthInfos: map[string]*api.AuthInfo{
userName: {
ClientKeyData: certs.EncodePrivateKeyPEM(clientKey),
ClientCertificateData: certs.EncodeCertPEM(clientCert),
},
},
CurrentContext: contextName,
}, nil
}
// CreateSecret creates the Kubeconfig secret for the given cluster.
func CreateSecret(ctx context.Context, c client.Client, cluster *clusterv1.Cluster) error {
name := util.ObjectKey(cluster)
return CreateSecretWithOwner(ctx, c, name, cluster.Spec.ControlPlaneEndpoint.String(), metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: cluster.Name,
UID: cluster.UID,
})
}
// CreateSecretWithOwner creates the Kubeconfig secret for the given cluster name, namespace, endpoint, and owner reference.
func CreateSecretWithOwner(ctx context.Context, c client.Client, clusterName client.ObjectKey, endpoint string, owner metav1.OwnerReference) error {
server := fmt.Sprintf("https://%s", endpoint)
out, err := generateKubeconfig(ctx, c, clusterName, server)
if err != nil {
return err
}
return c.Create(ctx, GenerateSecretWithOwner(clusterName, out, owner))
}
// GenerateSecret returns a Kubernetes secret for the given Cluster and kubeconfig data.
func GenerateSecret(cluster *clusterv1.Cluster, data []byte) *corev1.Secret {
name := util.ObjectKey(cluster)
return GenerateSecretWithOwner(name, data, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: cluster.Name,
UID: cluster.UID,
})
}
// GenerateSecretWithOwner returns a Kubernetes secret for the given Cluster name, namespace, kubeconfig data, and ownerReference.
func GenerateSecretWithOwner(clusterName client.ObjectKey, data []byte, owner metav1.OwnerReference) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret.Name(clusterName.Name, secret.Kubeconfig),
Namespace: clusterName.Namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: clusterName.Name,
},
OwnerReferences: []metav1.OwnerReference{
owner,
},
},
Data: map[string][]byte{
secret.KubeconfigDataName: data,
},
}
}
func generateKubeconfig(ctx context.Context, c client.Client, clusterName client.ObjectKey, endpoint string) ([]byte, error) {
clusterCA, err := secret.GetFromNamespacedName(ctx, c, clusterName, secret.ClusterCA)
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
return nil, ErrDependentCertificateNotFound
}
return nil, err
}
clientClusterCA, err := secret.GetFromNamespacedName(ctx, c, clusterName, secret.ClientClusterCA)
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
return nil, ErrDependentCertificateNotFound
}
return nil, err
}
clientCACert, err := certs.DecodeCertPEM(clientClusterCA.Data[secret.TLSCrtDataName])
if err != nil {
return nil, errors.Wrap(err, "failed to decode CA Cert")
} else if clientCACert == nil {
return nil, errors.New("certificate not found in config")
}
clientCAKey, err := certs.DecodePrivateKeyPEM(clientClusterCA.Data[secret.TLSKeyDataName])
if err != nil {
return nil, errors.Wrap(err, "failed to decode private key")
} else if clientCAKey == nil {
return nil, errors.New("CA private key not found")
}
serverCACert, err := certs.DecodeCertPEM(clusterCA.Data[secret.TLSCrtDataName])
if err != nil {
return nil, errors.Wrap(err, "failed to decode CA Cert")
} else if serverCACert == nil {
return nil, errors.New("certificate not found in config")
}
cfg, err := New(clusterName.Name, endpoint, clientCACert, clientCAKey, serverCACert)
if err != nil {
return nil, errors.Wrap(err, "failed to generate a kubeconfig")
}
out, err := clientcmd.Write(*cfg)
if err != nil {
return nil, errors.Wrap(err, "failed to serialize config to yaml")
}
return out, nil
}

10
go.mod
View File

@ -51,6 +51,7 @@ require (
sigs.k8s.io/cluster-api v1.2.4
sigs.k8s.io/cluster-api/test v1.2.4
sigs.k8s.io/controller-runtime v0.12.3
sigs.k8s.io/kind v0.14.0
sigs.k8s.io/yaml v1.3.0
)
@ -228,12 +229,12 @@ require (
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.23.0 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
golang.org/x/net v0.1.0 // indirect
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
@ -250,7 +251,6 @@ require (
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
oras.land/oras-go v1.2.0 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/kind v0.14.0 // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect

14
go.sum
View File

@ -1359,8 +1359,9 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1517,13 +1518,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1532,8 +1533,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -149,6 +149,14 @@ func (i *InstanceScope) KubernetesVersion() string {
return *i.Machine.Spec.Version
}
// IsControlPlane returns whether the KKInstance is a control plane node.
func (i *InstanceScope) IsControlPlane() bool {
if _, ok := i.Machine.GetLabels()[clusterv1.MachineControlPlaneLabelName]; ok {
return true
}
return false
}
// GetRawBootstrapDataWithFormat returns the raw bootstrap data from the corresponding machine.spec.bootstrap.
func (i *InstanceScope) GetRawBootstrapDataWithFormat(ctx context.Context) ([]byte, bootstrapv1.Format, error) {
if i.Machine.Spec.Bootstrap.DataSecretName == nil {

View File

@ -17,7 +17,9 @@
package k3s
import (
"embed"
"path/filepath"
"text/template"
"time"
infrav1 "github.com/kubesphere/kubekey/api/v1beta1"
@ -26,11 +28,17 @@ import (
"github.com/kubesphere/kubekey/pkg/service/util"
)
//go:embed templates
var f embed.FS
// Download downloads binaries.
func (s *Service) Download(timeout time.Duration) error {
if err := s.DownloadAll(timeout); err != nil {
return err
}
if err := s.GenerateK3sInstallScript(); err != nil {
return err
}
return nil
}
@ -76,3 +84,29 @@ func (s *Service) DownloadAll(timeout time.Duration) error {
return nil
}
// GenerateK3sInstallScript generates k3s install script.
func (s *Service) GenerateK3sInstallScript() error {
temp, err := template.ParseFS(f, "templates/k3s-install.sh")
if err != nil {
return err
}
svc, err := s.getTemplateService(
temp,
nil,
filepath.Join(file.BinDir, temp.Name()))
if err != nil {
return err
}
if err := svc.RenderToLocal(); err != nil {
return err
}
if err := svc.Copy(true); err != nil {
return err
}
if err := svc.Chmod("+x"); err != nil {
return err
}
return nil
}

View File

@ -17,6 +17,8 @@
package k3s
import (
"text/template"
"github.com/kubesphere/kubekey/pkg/clients/ssh"
"github.com/kubesphere/kubekey/pkg/scope"
"github.com/kubesphere/kubekey/pkg/service/operation"
@ -30,8 +32,9 @@ type Service struct {
scope scope.KKInstanceScope
instanceScope *scope.InstanceScope
k3sFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error)
kubecniFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error)
templateFactory func(sshClient ssh.Interface, template *template.Template, data file.Data, dst string) (operation.Template, error)
k3sFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error)
kubecniFactory func(sshClient ssh.Interface, version, arch string) (operation.Binary, error)
}
// NewService returns a new service given the remote instance.
@ -43,6 +46,13 @@ func NewService(sshClient ssh.Interface, scope scope.KKInstanceScope, instanceSc
}
}
func (s *Service) getTemplateService(template *template.Template, data file.Data, dst string) (operation.Template, error) {
if s.templateFactory != nil {
return s.templateFactory(s.sshClient, template, data, dst)
}
return file.NewTemplate(s.sshClient, s.scope.RootFs(), template, data, dst)
}
func (s *Service) getK3sService(version, arch string) (operation.Binary, error) {
if s.k3sFactory != nil {
return s.k3sFactory(s.sshClient, version, arch)

View File

@ -202,3 +202,13 @@ func (s *Service) DaemonReload() error {
_, _ = s.sshClient.SudoCmd("systemctl restart containerd")
return nil
}
// UninstallK3s uninstalls the k3s.
func (s *Service) UninstallK3s() error {
if s.instanceScope.IsControlPlane() {
_, _ = s.sshClient.SudoCmd("/usr/local/bin/k3s-uninstall.sh")
} else {
_, _ = s.sshClient.SudoCmd("/usr/local/bin/k3s-agent-uninstall.sh")
}
return nil
}

View File

@ -33,6 +33,7 @@ type Bootstrap interface {
ResetNetwork() error
RemoveFiles() error
DaemonReload() error
UninstallK3s() error
}
// Repository is the interface for repository provision.

View File

@ -419,14 +419,18 @@ var (
},
k3s: {
amd64: {
"v1.20.2": "ce3055783cf115ee68fc00bb8d25421d068579ece2fafa4ee1d09f3415aaeabf",
"v1.20.4": "1c7b68b0b7d54f21a9c1727545a7db181668115f161a3986bc137261dd817e98",
"v1.21.4": "47e686ad5390670da79a467ba94399d72e472364bc064a20fecd3937a8d928b5",
"v1.21.6": "89eb5f3d12524d0a9d5b56ba3e2707b106e1731dd0e6d2e7b898ac585f4959df",
"v1.20.2": "ce3055783cf115ee68fc00bb8d25421d068579ece2fafa4ee1d09f3415aaeabf",
"v1.20.4": "1c7b68b0b7d54f21a9c1727545a7db181668115f161a3986bc137261dd817e98",
"v1.21.4": "47e686ad5390670da79a467ba94399d72e472364bc064a20fecd3937a8d928b5",
"v1.21.6": "89eb5f3d12524d0a9d5b56ba3e2707b106e1731dd0e6d2e7b898ac585f4959df",
"v1.23.13": "334b42a96a65d7e54555827ca31f80896bf18978952f5aa5b8bb83bfdff3db0b",
"v1.24.7": "ec346c909f23b32f9ab7c3ccf5bfa74c89a0515191701ede83556345b70abdca",
},
arm64: {
"v1.21.4": "b7f8c026c5346b3e894d731f1dc2490cd7281687549f34c28a849f58c62e3e48",
"v1.21.6": "1f06a2da0e1e8596220a5504291ce69237979ebf520e2458c2d72573945a9c1d",
"v1.21.4": "b7f8c026c5346b3e894d731f1dc2490cd7281687549f34c28a849f58c62e3e48",
"v1.21.6": "1f06a2da0e1e8596220a5504291ce69237979ebf520e2458c2d72573945a9c1d",
"v1.23.13": "690a9e278d3e9b4b3884c787770ddda4b4d04490b09732913edaa9e9ede05c88",
"v1.24.7": "191de5ae89cd36a8e3a7582778c9839e90c6685e6ba32d0fb1249039f92a9058",
},
},
docker: {

View File

@ -53,13 +53,12 @@ func NewK3s(sshClient ssh.Interface, rootFs rootfs.Interface, version, arch stri
return nil, err
}
if arch == "amd64" {
arch = ""
} else {
arch = "-" + arch
var urlArchPath string
if arch != "amd64" {
urlArchPath = "-" + arch
}
u := parseURL(DefaultDownloadHostGoogle, fmt.Sprintf(K3sURLPathTmpl, version, arch))
u := parseURL(DefaultDownloadHost, fmt.Sprintf(K3sURLPathTmpl, version, urlArchPath))
binary := NewBinary(BinaryParams{
File: file,
ID: K3sID,

View File

@ -70,13 +70,20 @@ $(KIND_BIN): $(KIND) ## Build a local copy of KIND
## --------------------------------------
KUBEKEY_TEMPLATES := $(REPO_ROOT)/test/e2e/data/infrastructure-kubekey
KUBEKEY_K3S_TEMPLATES := $(REPO_ROOT)/test/e2e/data/k3s
.PHONY: cluster-templates
cluster-templates: $(KUSTOMIZE) cluster-templates-v1beta1 ## Generate cluster templates for all versions
.PHONY: cluster-templates-k3s
cluster-templates-k3s: $(KUSTOMIZE) cluster-templates-k3s-v1beta1
cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta1
$(KUSTOMIZE) build $(KUBEKEY_TEMPLATES)/v1beta1/cluster-template --load-restrictor LoadRestrictionsNone > $(KUBEKEY_TEMPLATES)/v1beta1/cluster-template.yaml
cluster-templates-k3s-v1beta1: $(KUSTOMIZE)
$(KUSTOMIZE) build $(KUBEKEY_K3S_TEMPLATES)/v1beta1/cluster-template --load-restrictor LoadRestrictionsNone > $(KUBEKEY_K3S_TEMPLATES)/v1beta1/cluster-template.yaml
## --------------------------------------
## Testing
## --------------------------------------
@ -99,14 +106,14 @@ endif
.PHONY: run
run: $(GINKGO) $(KIND) cluster-templates ## Run the end-to-end tests
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) ./suites/capkk/... -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE)" \
-e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER)
.PHONY: run-k3s
run-k3s: $(GINKGO) $(KIND) cluster-templates ## Run the end-to-end tests
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \
run-k3s: $(GINKGO) $(KIND) cluster-templates-k3s ## Run the end-to-end tests
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) ./suites/k3s/... -- \
-e2e.artifacts-folder="$(ARTIFACTS_K3S)" \
-e2e.config="$(E2E_K3S_CONF_FILE)" \
-e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER)

View File

@ -38,7 +38,7 @@ providers:
new: "imagePullPolicy: IfNotPresent"
- old: --metrics-bind-addr=127.0.0.1:8080
new: --metrics-bind-addr=:8080
- name: kubeadm
- name: k3s
type: BootstrapProvider
versions:
- name: v3.0.0
@ -53,7 +53,7 @@ providers:
new: --metrics-bind-addr=:8080
- old: docker.io/kubespheredev/k3s-bootstrap-controller:main
new: docker.io/kubespheredev/k3s-bootstrap-controller-amd64:e2e
- name: kubeadm
- name: k3s
type: ControlPlaneProvider
versions:
- name: v3.0.0
@ -80,7 +80,7 @@ providers:
- old: docker.io/kubespheredev/capkk-controller:main
new: docker.io/kubespheredev/capkk-controller-amd64:e2e
files:
- sourcePath: "../data/infrastructure-kubekey/v1beta1/cluster-template.yaml"
- sourcePath: "../data/k3s/v1beta1/cluster-template.yaml"
- sourcePath: "../data/shared/v1beta1_provider/metadata.yaml"
variables:
@ -89,18 +89,18 @@ variables:
# The following Kubernetes versions should be the latest versions with already published kindest/node images.
# This avoids building node images in the default case which improves the test duration significantly.
KUBERNETES_VERSION_MANAGEMENT: "v1.24.0"
KUBERNETES_VERSION: "v1.24.0"
CNI: "./data/cni/calico.yaml"
KUBERNETES_VERSION: "v1.24.7"
CNI: "../../data/cni/calico.yaml"
EVENT_BRIDGE_INSTANCE_STATE: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
IP_FAMILY: "IPv4"
SERVICE_CIDRS: "10.233.0.0/18"
POD_CIDRS: "10.233.64.0/18"
SERVICE_DOMAIN: "cluster.local"
KKZONE: "cn"
KKZONE: ""
USER_NAME: "ubuntu"
PASSWORD: "Qcloud@123"
INSTANCES: "[{address: 192.168.100.3}, {address: 192.168.100.4}]"
INSTANCES: "[{address: 192.168.100.3}, {address: 192.168.100.4}, {address: 192.168.100.5}]"
CONTROL_PLANE_ENDPOINT_IP: "192.168.100.100"
intervals:

View File

@ -17,7 +17,7 @@ spec:
host: '${CONTROL_PLANE_ENDPOINT_IP}'
---
# Cluster object with
# - Reference to the KubeadmControlPlane object
# - Reference to the K3sControlPlane object
# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet.
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
@ -63,72 +63,134 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
name: "${CLUSTER_NAME}-control-plane"
k3sConfigSpec:
serverConfiguration:
listener:
tlsSan: ${CONTROL_PLANE_ENDPOINT_IP}
networking:
flannelBackend: none
kubernetesComponents:
disable: "servicelb,traefik,metrics-server,local-storage"
disableNetworkPolicy: true
files:
- content: |
apiVersion: v1
kind: Pod
kind: ServiceAccount
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: v0.5.0
name: kube-vip-ds
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: address
value: ${CONTROL_PLANE_ENDPOINT_IP}
- name: vip_interface
value: ${VIP_NETWORK_INTERFACE=""}
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: lb_enable
value: "true"
- name: lb_port
value: "6443"
image: ghcr.io/kube-vip/kube-vip:v0.5.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
type: FileOrCreate
name: kubeconfig
status: {}
selector:
matchLabels:
app.kubernetes.io/name: kube-vip-ds
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: v0.5.0
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- manager
env:
- name: address
value: ${CONTROL_PLANE_ENDPOINT_IP}
- name: vip_interface
value: ${VIP_NETWORK_INTERFACE=""}
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: lb_enable
value: "true"
- name: lb_port
value: "6443"
- name: lb_fwdmethod
value: local
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v0.5.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
path: /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
version: "${KUBERNETES_VERSION}"

View File

@ -0,0 +1,184 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
)
// GetCAPIResourcesInput is the input for GetCAPIResources.
type GetCAPIResourcesInput struct {
Lister Lister
Namespace string
}
// GetCAPIResources reads all the CAPI resources in a namespace.
// This list includes all the types belonging to CAPI providers.
func GetCAPIResources(ctx context.Context, input GetCAPIResourcesInput) []*unstructured.Unstructured {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetCAPIResources")
Expect(input.Lister).NotTo(BeNil(), "input.Deleter is required for GetCAPIResources")
Expect(input.Namespace).NotTo(BeEmpty(), "input.Namespace is required for GetCAPIResources")
types := getClusterAPITypes(ctx, input.Lister)
objList := []*unstructured.Unstructured{}
for i := range types {
typeMeta := types[i]
typeList := new(unstructured.UnstructuredList)
typeList.SetAPIVersion(typeMeta.APIVersion)
typeList.SetKind(typeMeta.Kind)
if err := input.Lister.List(ctx, typeList, client.InNamespace(input.Namespace)); err != nil {
if apierrors.IsNotFound(err) {
continue
}
Fail(fmt.Sprintf("failed to list %q resources: %v", typeList.GroupVersionKind(), err))
}
for i := range typeList.Items {
obj := typeList.Items[i]
objList = append(objList, &obj)
}
}
return objList
}
// getClusterAPITypes returns the list of TypeMeta to be considered for the move discovery phase.
// This list includes all the types belonging to CAPI providers.
func getClusterAPITypes(ctx context.Context, lister Lister) []metav1.TypeMeta {
discoveredTypes := []metav1.TypeMeta{}
crdList := &apiextensionsv1.CustomResourceDefinitionList{}
Eventually(func() error {
return lister.List(ctx, crdList, capiProviderOptions()...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "failed to list CRDs for CAPI providers")
for _, crd := range crdList.Items {
for _, version := range crd.Spec.Versions {
if !version.Storage {
continue
}
discoveredTypes = append(discoveredTypes, metav1.TypeMeta{
Kind: crd.Spec.Names.Kind,
APIVersion: metav1.GroupVersion{
Group: crd.Spec.Group,
Version: version.Name,
}.String(),
})
}
}
return discoveredTypes
}
// DumpAllResourcesInput is the input for DumpAllResources.
type DumpAllResourcesInput struct {
Lister Lister
Namespace string
LogPath string
}
// DumpAllResources dumps Cluster API related resources to YAML
// This dump includes all the types belonging to CAPI providers.
func DumpAllResources(ctx context.Context, input DumpAllResourcesInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for DumpAllResources")
Expect(input.Lister).NotTo(BeNil(), "input.Deleter is required for DumpAllResources")
Expect(input.Namespace).NotTo(BeEmpty(), "input.Namespace is required for DumpAllResources")
resources := GetCAPIResources(ctx, GetCAPIResourcesInput{
Lister: input.Lister,
Namespace: input.Namespace,
})
for i := range resources {
r := resources[i]
dumpObject(r, input.LogPath)
}
}
func dumpObject(resource runtime.Object, logPath string) {
resourceYAML, err := yaml.Marshal(resource)
Expect(err).ToNot(HaveOccurred(), "Failed to marshal %s", resource.GetObjectKind().GroupVersionKind().String())
metaObj, err := apimeta.Accessor(resource)
Expect(err).ToNot(HaveOccurred(), "Failed to get accessor for %s", resource.GetObjectKind().GroupVersionKind().String())
kind := resource.GetObjectKind().GroupVersionKind().Kind
namespace := metaObj.GetNamespace()
name := metaObj.GetName()
resourceFilePath := filepath.Clean(path.Join(logPath, namespace, kind, name+".yaml"))
Expect(os.MkdirAll(filepath.Dir(resourceFilePath), 0750)).To(Succeed(), "Failed to create folder %s", filepath.Dir(resourceFilePath))
f, err := os.OpenFile(resourceFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
Expect(err).ToNot(HaveOccurred(), "Failed to open %s", resourceFilePath)
defer f.Close()
Expect(os.WriteFile(f.Name(), resourceYAML, 0600)).To(Succeed(), "Failed to write %s", resourceFilePath)
}
// capiProviderOptions returns a set of ListOptions that allows to identify all the objects belonging to Cluster API providers.
func capiProviderOptions() []client.ListOption {
return []client.ListOption{
client.HasLabels{clusterv1.ProviderLabelName},
}
}
// CreateRelatedResourcesInput is the input type for CreateRelatedResources.
type CreateRelatedResourcesInput struct {
Creator Creator
RelatedResources []client.Object
}
// CreateRelatedResources is used to create runtime.Objects.
func CreateRelatedResources(ctx context.Context, input CreateRelatedResourcesInput, intervals ...interface{}) {
By("creating related resources")
for i := range input.RelatedResources {
obj := input.RelatedResources[i]
Byf("creating a/an %s resource", obj.GetObjectKind().GroupVersionKind())
Eventually(func() error {
return input.Creator.Create(ctx, obj)
}, intervals...).Should(Succeed(), "failed to create %s", obj.GetObjectKind().GroupVersionKind())
}
}
// PrettyPrint returns a formatted JSON version of the object given.
func PrettyPrint(v interface{}) string {
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}

View File

@ -0,0 +1,36 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package bootstrap implements bootstrap functionality for e2e testing.
package bootstrap
import "context"
// ClusterProvider defines the behavior of a type that is responsible for provisioning and managing a Kubernetes cluster.
type ClusterProvider interface {
// Create a Kubernetes cluster.
// Generally to be used in the BeforeSuite function to create a Kubernetes cluster to be shared between tests.
Create(context.Context)
// GetKubeconfigPath returns the path to the kubeconfig file to be used to access the Kubernetes cluster.
GetKubeconfigPath() string
// Dispose will completely clean up the provisioned cluster.
// This should be implemented as a synchronous function.
// Generally to be used in the AfterSuite function if a Kubernetes cluster is shared between tests.
// Should try to clean everything up and report any dangling artifacts that needs manual intervention.
Dispose(context.Context)
}

View File

@ -0,0 +1,204 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"context"
"fmt"
"os"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kindv1 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
kind "sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/cmd"
"sigs.k8s.io/kind/pkg/exec"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
const (
// DefaultNodeImageRepository is the default node image repository to be used for testing.
DefaultNodeImageRepository = "kindest/node"
// DefaultNodeImageVersion is the default Kubernetes version to be used for creating a kind cluster.
DefaultNodeImageVersion = "v1.25.0"
)
// KindClusterOption is a NewKindClusterProvider option.
type KindClusterOption interface {
apply(*KindClusterProvider)
}
type kindClusterOptionAdapter func(*KindClusterProvider)
func (adapter kindClusterOptionAdapter) apply(kindClusterProvider *KindClusterProvider) {
adapter(kindClusterProvider)
}
// WithNodeImage implements a New Option that instruct the kindClusterProvider to use a specific node image / Kubernetes version.
func WithNodeImage(image string) KindClusterOption {
return kindClusterOptionAdapter(func(k *KindClusterProvider) {
k.nodeImage = image
})
}
// WithDockerSockMount implements a New Option that instruct the kindClusterProvider to mount /var/run/docker.sock into
// the new kind cluster.
func WithDockerSockMount() KindClusterOption {
return kindClusterOptionAdapter(func(k *KindClusterProvider) {
k.withDockerSock = true
})
}
// WithIPv6Family implements a New Option that instruct the kindClusterProvider to set the IPFamily to IPv6 in
// the new kind cluster.
func WithIPv6Family() KindClusterOption {
return kindClusterOptionAdapter(func(k *KindClusterProvider) {
k.ipFamily = clusterv1.IPv6IPFamily
})
}
// LogFolder implements a New Option that instruct the kindClusterProvider to dump bootstrap logs in a folder in case of errors.
func LogFolder(path string) KindClusterOption {
return kindClusterOptionAdapter(func(k *KindClusterProvider) {
k.logFolder = path
})
}
// NewKindClusterProvider returns a ClusterProvider that can create a kind cluster.
func NewKindClusterProvider(name string, options ...KindClusterOption) *KindClusterProvider {
Expect(name).ToNot(BeEmpty(), "name is required for NewKindClusterProvider")
clusterProvider := &KindClusterProvider{
name: name,
}
for _, option := range options {
option.apply(clusterProvider)
}
return clusterProvider
}
// KindClusterProvider implements a ClusterProvider that can create a kind cluster.
type KindClusterProvider struct {
name string
withDockerSock bool
kubeconfigPath string
nodeImage string
ipFamily clusterv1.ClusterIPFamily
logFolder string
}
// Create a Kubernetes cluster using kind.
func (k *KindClusterProvider) Create(ctx context.Context) {
Expect(ctx).NotTo(BeNil(), "ctx is required for Create")
// Sets the kubeconfig path to a temp file.
// NB. the ClusterProvider is responsible for the cleanup of this file
f, err := os.CreateTemp("", "e2e-kind")
Expect(err).ToNot(HaveOccurred(), "Failed to create kubeconfig file for the kind cluster %q", k.name)
k.kubeconfigPath = f.Name()
// Creates the kind cluster
k.createKindCluster()
}
// createKindCluster calls the kind library taking care of passing options for:
// - use a dedicated kubeconfig file (test should not alter the user environment)
// - if required, mount /var/run/docker.sock.
func (k *KindClusterProvider) createKindCluster() {
kindCreateOptions := []kind.CreateOption{
kind.CreateWithKubeconfigPath(k.kubeconfigPath),
}
cfg := &kindv1.Cluster{
TypeMeta: kindv1.TypeMeta{
APIVersion: "kind.x-k8s.io/v1alpha4",
Kind: "Cluster",
},
}
if k.ipFamily == clusterv1.IPv6IPFamily {
cfg.Networking.IPFamily = kindv1.IPv6Family
}
kindv1.SetDefaultsCluster(cfg)
if k.withDockerSock {
setDockerSockConfig(cfg)
}
kindCreateOptions = append(kindCreateOptions, kind.CreateWithV1Alpha4Config(cfg))
nodeImage := fmt.Sprintf("%s:%s", DefaultNodeImageRepository, DefaultNodeImageVersion)
if k.nodeImage != "" {
nodeImage = k.nodeImage
}
kindCreateOptions = append(kindCreateOptions, kind.CreateWithNodeImage(nodeImage))
kindCreateOptions = append(kindCreateOptions, kind.CreateWithRetain(true))
provider := kind.NewProvider(kind.ProviderWithLogger(cmd.NewLogger()))
err := provider.Create(k.name, kindCreateOptions...)
if err != nil {
// if requested, dump kind logs
if k.logFolder != "" {
if err := provider.CollectLogs(k.name, k.logFolder); err != nil {
log.Logf("Failed to collect logs from kind: %v", err)
}
}
errStr := fmt.Sprintf("Failed to create kind cluster %q: %v", k.name, err)
// Extract the details of the RunError, if the cluster creation was triggered by a RunError.
var runErr *exec.RunError
if errors.As(err, &runErr) {
errStr += "\n" + string(runErr.Output)
}
Expect(err).ToNot(HaveOccurred(), errStr)
}
}
// setDockerSockConfig returns a kind config for mounting /var/run/docker.sock into the kind node.
func setDockerSockConfig(cfg *kindv1.Cluster) {
cfg.Nodes = []kindv1.Node{
{
Role: kindv1.ControlPlaneRole,
ExtraMounts: []kindv1.Mount{
{
HostPath: "/var/run/docker.sock",
ContainerPath: "/var/run/docker.sock",
},
},
},
}
}
// GetKubeconfigPath returns the path to the kubeconfig file for the cluster.
func (k *KindClusterProvider) GetKubeconfigPath() string {
return k.kubeconfigPath
}
// Dispose the kind cluster and its kubeconfig file.
func (k *KindClusterProvider) Dispose(ctx context.Context) {
Expect(ctx).NotTo(BeNil(), "ctx is required for Dispose")
if err := kind.NewProvider().Delete(k.name, k.kubeconfigPath); err != nil {
log.Logf("Deleting the kind cluster %q failed. You may need to remove this by hand.", k.name)
}
if err := os.Remove(k.kubeconfigPath); err != nil {
log.Logf("Deleting the kubeconfig file %q file. You may need to remove this by hand.", k.kubeconfigPath)
}
}

View File

@ -0,0 +1,197 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"context"
"fmt"
"os"
"path/filepath"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"sigs.k8s.io/cluster-api/test/infrastructure/container"
kind "sigs.k8s.io/kind/pkg/cluster"
kindnodes "sigs.k8s.io/kind/pkg/cluster/nodes"
kindnodesutils "sigs.k8s.io/kind/pkg/cluster/nodeutils"
"github.com/kubesphere/kubekey/test/e2e/framework/clusterctl"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// CreateKindBootstrapClusterAndLoadImagesInput is the input for CreateKindBootstrapClusterAndLoadImages.
type CreateKindBootstrapClusterAndLoadImagesInput struct {
// Name of the cluster.
Name string
// KubernetesVersion of the cluster.
KubernetesVersion string
// RequiresDockerSock defines if the cluster requires the docker sock.
RequiresDockerSock bool
// Images to be loaded in the cluster.
Images []clusterctl.ContainerImage
// IPFamily is either ipv4 or ipv6. Default is ipv4.
IPFamily string
// LogFolder where to dump logs in case of errors
LogFolder string
}
// CreateKindBootstrapClusterAndLoadImages returns a new Kubernetes cluster with pre-loaded images.
func CreateKindBootstrapClusterAndLoadImages(ctx context.Context, input CreateKindBootstrapClusterAndLoadImagesInput) ClusterProvider {
Expect(ctx).NotTo(BeNil(), "ctx is required for CreateKindBootstrapClusterAndLoadImages")
Expect(input.Name).ToNot(BeEmpty(), "Invalid argument. Name can't be empty when calling CreateKindBootstrapClusterAndLoadImages")
log.Logf("Creating a kind cluster with name %q", input.Name)
options := []KindClusterOption{}
if input.KubernetesVersion != "" {
options = append(options, WithNodeImage(fmt.Sprintf("%s:%s", DefaultNodeImageRepository, input.KubernetesVersion)))
}
if input.RequiresDockerSock {
options = append(options, WithDockerSockMount())
}
if input.IPFamily == "IPv6" {
options = append(options, WithIPv6Family())
}
if input.LogFolder != "" {
options = append(options, LogFolder(input.LogFolder))
}
clusterProvider := NewKindClusterProvider(input.Name, options...)
Expect(clusterProvider).ToNot(BeNil(), "Failed to create a kind cluster")
clusterProvider.Create(ctx)
Expect(clusterProvider.GetKubeconfigPath()).To(BeAnExistingFile(), "The kubeconfig file for the kind cluster with name %q does not exists at %q as expected", input.Name, clusterProvider.GetKubeconfigPath())
log.Logf("The kubeconfig file for the kind cluster is %s", clusterProvider.kubeconfigPath)
err := LoadImagesToKindCluster(ctx, LoadImagesToKindClusterInput{
Name: input.Name,
Images: input.Images,
})
if err != nil {
clusterProvider.Dispose(ctx)
Expect(err).NotTo(HaveOccurred()) // re-surface the error to fail the test
}
return clusterProvider
}
// LoadImagesToKindClusterInput is the input for LoadImagesToKindCluster.
type LoadImagesToKindClusterInput struct {
// Name of the cluster
Name string
// Images to be loaded in the cluster (this is kind specific)
Images []clusterctl.ContainerImage
}
// LoadImagesToKindCluster provides a utility for loading images into a kind cluster.
func LoadImagesToKindCluster(ctx context.Context, input LoadImagesToKindClusterInput) error {
if ctx == nil {
return errors.New("ctx is required for LoadImagesToKindCluster")
}
if input.Name == "" {
return errors.New("Invalid argument. Name can't be empty when calling LoadImagesToKindCluster")
}
containerRuntime, err := container.NewDockerClient()
if err != nil {
return errors.Wrap(err, "failed to get Docker runtime client")
}
ctx = container.RuntimeInto(ctx, containerRuntime)
for _, image := range input.Images {
log.Logf("Loading image: %q", image.Name)
if err := loadImage(ctx, input.Name, image.Name); err != nil {
switch image.LoadBehavior {
case clusterctl.MustLoadImage:
return errors.Wrapf(err, "Failed to load image %q into the kind cluster %q", image.Name, input.Name)
case clusterctl.TryLoadImage:
log.Logf("[WARNING] Unable to load image %q into the kind cluster %q: %v", image.Name, input.Name, err)
}
}
}
return nil
}
// LoadImage will put a local image onto the kind node.
// If the image doesn't exist locally we will attempt to pull it remotely.
func loadImage(ctx context.Context, cluster, image string) error {
// Save the image into a tar
dir, err := os.MkdirTemp("", "image-tar")
if err != nil {
return errors.Wrap(err, "failed to create tempdir")
}
defer os.RemoveAll(dir)
imageTarPath := filepath.Join(dir, "image.tar")
containerRuntime, err := container.RuntimeFrom(ctx)
if err != nil {
return errors.Wrap(err, "failed to access container runtime")
}
// in the nominal E2E scenario images have been locally built and added to cache
exists, err := containerRuntime.ImageExistsLocally(ctx, image)
if err != nil {
return errors.Wrapf(err, "error listing local image %s", image)
}
// in some scenarios we refer to a real reference image which may not have been pre-downloaded
if !exists {
log.Logf("Image %s not present in local container image cache, will pull", image)
err := containerRuntime.PullContainerImage(ctx, image)
if err != nil {
return errors.Wrapf(err, "error pulling image %q", image)
}
} else {
log.Logf("Image %s is present in local container image cache", image)
}
err = containerRuntime.SaveContainerImage(ctx, image, imageTarPath)
if err != nil {
return errors.Wrapf(err, "error saving image %q to %q", image, imageTarPath)
}
// Gets the nodes in the cluster
provider := kind.NewProvider()
nodeList, err := provider.ListInternalNodes(cluster)
if err != nil {
return err
}
// Load the image on the selected nodes
for _, node := range nodeList {
if err := load(imageTarPath, node); err != nil {
return err
}
}
return nil
}
// copied from kind https://github.com/kubernetes-sigs/kind/blob/v0.7.0/pkg/cmd/kind/load/docker-image/docker-image.go#L158
// loads an image tarball onto a node.
func load(imageTarName string, node kindnodes.Node) error {
f, err := os.Open(filepath.Clean(imageTarName))
if err != nil {
return errors.Wrap(err, "failed to open image")
}
defer f.Close()
return kindnodesutils.LoadImageArchive(node, f)
}

View File

@ -0,0 +1,287 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// CreateClusterInput is the input for CreateCluster.
type CreateClusterInput struct {
Creator Creator
Cluster *clusterv1.Cluster
InfraCluster client.Object
}
// CreateCluster will create the Cluster and InfraCluster objects.
func CreateCluster(ctx context.Context, input CreateClusterInput, intervals ...interface{}) {
By("creating an InfrastructureCluster resource")
Eventually(func() error {
return input.Creator.Create(ctx, input.InfraCluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create InfrastructureCluster %s", klog.KObj(input.InfraCluster))
// This call happens in an eventually because of a race condition with the
// webhook server. If the latter isn't fully online then this call will
// fail.
By("creating a Cluster resource linked to the InfrastructureCluster resource")
Eventually(func() error {
if err := input.Creator.Create(ctx, input.Cluster); err != nil {
log.Logf("Failed to create a cluster: %+v", err)
return err
}
return nil
}, intervals...).Should(Succeed(), "Failed to create Cluster %s", klog.KObj(input.Cluster))
}
// GetAllClustersByNamespaceInput is the input for GetAllClustersByNamespace.
type GetAllClustersByNamespaceInput struct {
Lister Lister
Namespace string
}
// GetAllClustersByNamespace returns the list of Cluster object in a namespace.
func GetAllClustersByNamespace(ctx context.Context, input GetAllClustersByNamespaceInput) []*clusterv1.Cluster {
clusterList := &clusterv1.ClusterList{}
Eventually(func() error {
return input.Lister.List(ctx, clusterList, client.InNamespace(input.Namespace))
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list clusters in namespace %s", input.Namespace)
clusters := make([]*clusterv1.Cluster, len(clusterList.Items))
for i := range clusterList.Items {
clusters[i] = &clusterList.Items[i]
}
return clusters
}
// GetClusterByNameInput is the input for GetClusterByName.
type GetClusterByNameInput struct {
Getter Getter
Name string
Namespace string
}
// GetClusterByName returns a Cluster object given his name.
func GetClusterByName(ctx context.Context, input GetClusterByNameInput) *clusterv1.Cluster {
cluster := &clusterv1.Cluster{}
key := client.ObjectKey{
Namespace: input.Namespace,
Name: input.Name,
}
Eventually(func() error {
return input.Getter.Get(ctx, key, cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get Cluster object %s", klog.KRef(input.Namespace, input.Name))
return cluster
}
// PatchClusterLabelInput is the input for PatchClusterLabel.
type PatchClusterLabelInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
Labels map[string]string
}
// PatchClusterLabel patches labels to a cluster.
func PatchClusterLabel(ctx context.Context, input PatchClusterLabelInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for PatchClusterLabel")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling PatchClusterLabel")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling PatchClusterLabel")
Expect(input.Labels).ToNot(BeEmpty(), "Invalid argument. input.Labels can't be empty when calling PatchClusterLabel")
log.Logf("Patching the label to the cluster")
patchHelper, err := patch.NewHelper(input.Cluster, input.ClusterProxy.GetClient())
Expect(err).ToNot(HaveOccurred())
input.Cluster.SetLabels(input.Labels)
Eventually(func() error {
return patchHelper.Patch(ctx, input.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch label to cluster %s", klog.KObj(input.Cluster))
}
// WaitForClusterToProvisionInput is the input for WaitForClusterToProvision.
type WaitForClusterToProvisionInput struct {
Getter Getter
Cluster *clusterv1.Cluster
}
// WaitForClusterToProvision will wait for a cluster to have a phase status of provisioned.
func WaitForClusterToProvision(ctx context.Context, input WaitForClusterToProvisionInput, intervals ...interface{}) *clusterv1.Cluster {
cluster := &clusterv1.Cluster{}
By("Waiting for cluster to enter the provisioned phase")
Eventually(func() (string, error) {
key := client.ObjectKey{
Namespace: input.Cluster.GetNamespace(),
Name: input.Cluster.GetName(),
}
if err := input.Getter.Get(ctx, key, cluster); err != nil {
return "", err
}
return cluster.Status.Phase, nil
}, intervals...).Should(Equal(string(clusterv1.ClusterPhaseProvisioned)), "Timed out waiting for Cluster %s to provision", klog.KObj(input.Cluster))
return cluster
}
// DeleteClusterInput is the input for DeleteCluster.
type DeleteClusterInput struct {
Deleter Deleter
Cluster *clusterv1.Cluster
}
// DeleteCluster deletes the cluster.
func DeleteCluster(ctx context.Context, input DeleteClusterInput) {
Byf("Deleting cluster %s", input.Cluster.GetName())
Expect(input.Deleter.Delete(ctx, input.Cluster)).To(Succeed())
}
// WaitForClusterDeletedInput is the input for WaitForClusterDeleted.
type WaitForClusterDeletedInput struct {
Getter Getter
Cluster *clusterv1.Cluster
}
// WaitForClusterDeleted waits until the cluster object has been deleted.
func WaitForClusterDeleted(ctx context.Context, input WaitForClusterDeletedInput, intervals ...interface{}) {
Byf("Waiting for cluster %s to be deleted", input.Cluster.GetName())
Eventually(func() bool {
cluster := &clusterv1.Cluster{}
key := client.ObjectKey{
Namespace: input.Cluster.GetNamespace(),
Name: input.Cluster.GetName(),
}
return apierrors.IsNotFound(input.Getter.Get(ctx, key, cluster))
}, intervals...).Should(BeTrue())
}
// DiscoveryAndWaitForClusterInput is the input type for DiscoveryAndWaitForCluster.
type DiscoveryAndWaitForClusterInput struct {
Getter Getter
Namespace string
Name string
}
// DiscoveryAndWaitForCluster discovers a cluster object in a namespace and waits for the cluster infrastructure to be provisioned.
func DiscoveryAndWaitForCluster(ctx context.Context, input DiscoveryAndWaitForClusterInput, intervals ...interface{}) *clusterv1.Cluster {
Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForCluster")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling DiscoveryAndWaitForCluster")
Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be empty when calling DiscoveryAndWaitForCluster")
Expect(input.Name).ToNot(BeNil(), "Invalid argument. input.Name can't be empty when calling DiscoveryAndWaitForCluster")
var cluster *clusterv1.Cluster
Eventually(func() bool {
cluster = GetClusterByName(ctx, GetClusterByNameInput{
Getter: input.Getter,
Name: input.Name,
Namespace: input.Namespace,
})
return cluster != nil
}, retryableOperationTimeout, retryableOperationInterval).Should(BeTrue(), "Failed to get Cluster object %s", klog.KRef(input.Namespace, input.Name))
// NOTE: We intentionally return the provisioned Cluster because it also contains
// the reconciled ControlPlane ref and InfrastructureCluster ref when using a ClusterClass.
cluster = WaitForClusterToProvision(ctx, WaitForClusterToProvisionInput{
Getter: input.Getter,
Cluster: cluster,
}, intervals...)
return cluster
}
// DeleteClusterAndWaitInput is the input type for DeleteClusterAndWait.
type DeleteClusterAndWaitInput struct {
Client client.Client
Cluster *clusterv1.Cluster
}
// DeleteClusterAndWait deletes a cluster object and waits for it to be gone.
func DeleteClusterAndWait(ctx context.Context, input DeleteClusterAndWaitInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteClusterAndWait")
Expect(input.Client).ToNot(BeNil(), "Invalid argument. input.Client can't be nil when calling DeleteClusterAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DeleteClusterAndWait")
DeleteCluster(ctx, DeleteClusterInput{
Deleter: input.Client,
Cluster: input.Cluster,
})
log.Logf("Waiting for the Cluster object to be deleted")
WaitForClusterDeleted(ctx, WaitForClusterDeletedInput{
Getter: input.Client,
Cluster: input.Cluster,
}, intervals...)
//TODO: consider if to move in another func (what if there are more than one cluster?)
log.Logf("Check for all the Cluster API resources being deleted")
Eventually(func() []*unstructured.Unstructured {
return GetCAPIResources(ctx, GetCAPIResourcesInput{
Lister: input.Client,
Namespace: input.Cluster.Namespace,
})
}, retryableOperationTimeout, retryableOperationInterval).Should(BeEmpty(), "There are still Cluster API resources in the %q namespace", input.Cluster.Namespace)
}
// DeleteAllClustersAndWaitInput is the input type for DeleteAllClustersAndWait.
type DeleteAllClustersAndWaitInput struct {
Client client.Client
Namespace string
}
// DeleteAllClustersAndWait deletes a cluster object and waits for it to be gone.
func DeleteAllClustersAndWait(ctx context.Context, input DeleteAllClustersAndWaitInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteAllClustersAndWait")
Expect(input.Client).ToNot(BeNil(), "Invalid argument. input.Client can't be nil when calling DeleteAllClustersAndWait")
Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling DeleteAllClustersAndWait")
clusters := GetAllClustersByNamespace(ctx, GetAllClustersByNamespaceInput{
Lister: input.Client,
Namespace: input.Namespace,
})
for _, c := range clusters {
DeleteCluster(ctx, DeleteClusterInput{
Deleter: input.Client,
Cluster: c,
})
}
for _, c := range clusters {
log.Logf("Waiting for the Cluster %s to be deleted", klog.KObj(c))
WaitForClusterDeleted(ctx, WaitForClusterDeletedInput{
Getter: input.Client,
Cluster: c,
}, intervals...)
}
}
// byClusterOptions returns a set of ListOptions that allows to identify all the objects belonging to a Cluster.
func byClusterOptions(name, namespace string) []client.ListOption {
return []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
clusterv1.ClusterLabelName: name,
},
}
}

View File

@ -0,0 +1,377 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path"
goruntime "runtime"
"time"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework/exec"
"sigs.k8s.io/cluster-api/test/infrastructure/container"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
const (
retryableOperationInterval = 3 * time.Second
// retryableOperationTimeout requires a higher value especially for self-hosted upgrades.
// Short unavailability of the Kube APIServer due to joining etcd members paired with unreachable conversion webhooks due to
// failed leader election and thus controller restarts lead to longer taking retries.
// The timeout occurs when listing machines in `GetControlPlaneMachinesByCluster`.
retryableOperationTimeout = 3 * time.Minute
)
// ClusterProxy defines the behavior of a type that acts as an intermediary with an existing Kubernetes cluster.
// It should work with any Kubernetes cluster, no matter if the Cluster was created by a bootstrap.ClusterProvider,
// by Cluster API (a workload cluster or a self-hosted cluster) or else.
type ClusterProxy interface {
// GetName returns the name of the cluster.
GetName() string
// GetKubeconfigPath returns the path to the kubeconfig file to be used to access the Kubernetes cluster.
GetKubeconfigPath() string
// GetScheme returns the scheme defining the types hosted in the Kubernetes cluster.
// It is used when creating a controller-runtime client.
GetScheme() *runtime.Scheme
// GetClient returns a controller-runtime client to the Kubernetes cluster.
GetClient() client.Client
// GetClientSet returns a client-go client to the Kubernetes cluster.
GetClientSet() *kubernetes.Clientset
// GetRESTConfig returns the REST config for direct use with client-go if needed.
GetRESTConfig() *rest.Config
// GetLogCollector returns the machine log collector for the Kubernetes cluster.
GetLogCollector() ClusterLogCollector
// Apply to apply YAML to the Kubernetes cluster, `kubectl apply`.
Apply(ctx context.Context, resources []byte, args ...string) error
// GetWorkloadCluster returns a proxy to a workload cluster defined in the Kubernetes cluster.
GetWorkloadCluster(ctx context.Context, namespace, name string) ClusterProxy
// CollectWorkloadClusterLogs collects machines logs from the workload cluster.
CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string)
// Dispose proxy's internal resources (the operation does not affects the Kubernetes cluster).
// This should be implemented as a synchronous function.
Dispose(context.Context)
}
// ClusterLogCollector defines an object that can collect logs from a machine.
type ClusterLogCollector interface {
// CollectMachineLog collects log from a machine.
// TODO: describe output folder struct
CollectMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine, outputPath string) error
CollectMachinePoolLog(ctx context.Context, managementClusterClient client.Client, m *expv1.MachinePool, outputPath string) error
}
// Option is a configuration option supplied to NewClusterProxy.
type Option func(*clusterProxy)
// WithMachineLogCollector allows to define the machine log collector to be used with this Cluster.
func WithMachineLogCollector(logCollector ClusterLogCollector) Option {
return func(c *clusterProxy) {
c.logCollector = logCollector
}
}
// clusterProxy provides a base implementation of the ClusterProxy interface.
type clusterProxy struct {
name string
kubeconfigPath string
scheme *runtime.Scheme
shouldCleanupKubeconfig bool
logCollector ClusterLogCollector
}
// NewClusterProxy returns a clusterProxy given a KubeconfigPath and the scheme defining the types hosted in the cluster.
// If a kubeconfig file isn't provided, standard kubeconfig locations will be used (kubectl loading rules apply).
func NewClusterProxy(name string, kubeconfigPath string, scheme *runtime.Scheme, options ...Option) ClusterProxy {
Expect(scheme).NotTo(BeNil(), "scheme is required for NewClusterProxy")
if kubeconfigPath == "" {
kubeconfigPath = clientcmd.NewDefaultClientConfigLoadingRules().GetDefaultFilename()
}
proxy := &clusterProxy{
name: name,
kubeconfigPath: kubeconfigPath,
scheme: scheme,
shouldCleanupKubeconfig: false,
}
for _, o := range options {
o(proxy)
}
return proxy
}
// newFromAPIConfig returns a clusterProxy given a api.Config and the scheme defining the types hosted in the cluster.
func newFromAPIConfig(name string, config *api.Config, scheme *runtime.Scheme) ClusterProxy {
// NB. the ClusterProvider is responsible for the cleanup of this file
f, err := os.CreateTemp("", "e2e-kubeconfig")
Expect(err).ToNot(HaveOccurred(), "Failed to create kubeconfig file for the kind cluster %q")
kubeconfigPath := f.Name()
err = clientcmd.WriteToFile(*config, kubeconfigPath)
Expect(err).ToNot(HaveOccurred(), "Failed to write kubeconfig for the kind cluster to a file %q")
return &clusterProxy{
name: name,
kubeconfigPath: kubeconfigPath,
scheme: scheme,
shouldCleanupKubeconfig: true,
}
}
// GetName returns the name of the cluster.
func (p *clusterProxy) GetName() string {
return p.name
}
// GetKubeconfigPath returns the path to the kubeconfig file for the cluster.
func (p *clusterProxy) GetKubeconfigPath() string {
return p.kubeconfigPath
}
// GetScheme returns the scheme defining the types hosted in the cluster.
func (p *clusterProxy) GetScheme() *runtime.Scheme {
return p.scheme
}
// GetClient returns a controller-runtime client for the cluster.
func (p *clusterProxy) GetClient() client.Client {
config := p.GetRESTConfig()
var c client.Client
var newClientErr error
err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
c, newClientErr = client.New(config, client.Options{Scheme: p.scheme})
if newClientErr != nil {
return false, nil //nolint:nilerr
}
return true, nil
})
errorString := "Failed to get controller-runtime client"
Expect(newClientErr).ToNot(HaveOccurred(), errorString)
Expect(err).ToNot(HaveOccurred(), errorString)
return c
}
// GetClientSet returns a client-go client for the cluster.
func (p *clusterProxy) GetClientSet() *kubernetes.Clientset {
restConfig := p.GetRESTConfig()
cs, err := kubernetes.NewForConfig(restConfig)
Expect(err).ToNot(HaveOccurred(), "Failed to get client-go client")
return cs
}
// Apply wraps `kubectl apply ...` and prints the output so we can see what gets applied to the cluster.
func (p *clusterProxy) Apply(ctx context.Context, resources []byte, args ...string) error {
Expect(ctx).NotTo(BeNil(), "ctx is required for Apply")
Expect(resources).NotTo(BeNil(), "resources is required for Apply")
return exec.KubectlApply(ctx, p.kubeconfigPath, resources, args...)
}
func (p *clusterProxy) GetRESTConfig() *rest.Config {
config, err := clientcmd.LoadFromFile(p.kubeconfigPath)
Expect(err).ToNot(HaveOccurred(), "Failed to load Kubeconfig file from %q", p.kubeconfigPath)
restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
Expect(err).ToNot(HaveOccurred(), "Failed to get ClientConfig from %q", p.kubeconfigPath)
restConfig.UserAgent = "cluster-api-e2e"
return restConfig
}
func (p *clusterProxy) GetLogCollector() ClusterLogCollector {
return p.logCollector
}
// GetWorkloadCluster returns ClusterProxy for the workload cluster.
func (p *clusterProxy) GetWorkloadCluster(ctx context.Context, namespace, name string) ClusterProxy {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetWorkloadCluster")
Expect(namespace).NotTo(BeEmpty(), "namespace is required for GetWorkloadCluster")
Expect(name).NotTo(BeEmpty(), "name is required for GetWorkloadCluster")
// gets the kubeconfig from the cluster
config := p.getKubeconfig(ctx, namespace, name)
// if we are on mac and the cluster is a DockerCluster, it is required to fix the control plane address
// by using localhost:load-balancer-host-port instead of the address used in the docker network.
if goruntime.GOOS == "darwin" && p.isDockerCluster(ctx, namespace, name) {
p.fixConfig(ctx, name, config)
}
return newFromAPIConfig(name, config, p.scheme)
}
// CollectWorkloadClusterLogs collects machines logs from the workload cluster.
func (p *clusterProxy) CollectWorkloadClusterLogs(ctx context.Context, namespace, name, outputPath string) {
if p.logCollector == nil {
return
}
var machines *clusterv1.MachineList
Eventually(func() error {
var err error
machines, err = getMachinesInCluster(ctx, p.GetClient(), namespace, name)
return err
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get Machines for the Cluster %s", klog.KRef(namespace, name))
for i := range machines.Items {
m := &machines.Items[i]
err := p.logCollector.CollectMachineLog(ctx, p.GetClient(), m, path.Join(outputPath, "machines", m.GetName()))
if err != nil {
// NB. we are treating failures in collecting logs as a non blocking operation (best effort)
fmt.Printf("Failed to get logs for Machine %s, Cluster %s: %v\n", m.GetName(), klog.KRef(namespace, name), err)
}
}
var machinePools *expv1.MachinePoolList
Eventually(func() error {
var err error
machinePools, err = getMachinePoolsInCluster(ctx, p.GetClient(), namespace, name)
return err
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get MachinePools for Cluster %s", klog.KRef(namespace, name))
for i := range machinePools.Items {
mp := &machinePools.Items[i]
err := p.logCollector.CollectMachinePoolLog(ctx, p.GetClient(), mp, path.Join(outputPath, "machine-pools", mp.GetName()))
if err != nil {
// NB. we are treating failures in collecting logs as a non blocking operation (best effort)
fmt.Printf("Failed to get logs for MachinePool %s, Cluster %s: %v\n", mp.GetName(), klog.KRef(namespace, name), err)
}
}
}
func getMachinesInCluster(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachineList, error) {
if name == "" {
return nil, errors.New("cluster name should not be empty")
}
machineList := &clusterv1.MachineList{}
labels := map[string]string{clusterv1.ClusterLabelName: name}
if err := c.List(ctx, machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
return nil, err
}
return machineList, nil
}
func getMachinePoolsInCluster(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePoolList, error) {
if name == "" {
return nil, errors.New("cluster name should not be empty")
}
machinePoolList := &expv1.MachinePoolList{}
labels := map[string]string{clusterv1.ClusterLabelName: name}
if err := c.List(ctx, machinePoolList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
return nil, err
}
return machinePoolList, nil
}
func (p *clusterProxy) getKubeconfig(ctx context.Context, namespace string, name string) *api.Config {
cl := p.GetClient()
secret := &corev1.Secret{}
key := client.ObjectKey{
Name: fmt.Sprintf("%s-kubeconfig", name),
Namespace: namespace,
}
Eventually(func() error {
return cl.Get(ctx, key, secret)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get %s", key)
Expect(secret.Data).To(HaveKey("value"), "Invalid secret %s", key)
config, err := clientcmd.Load(secret.Data["value"])
Expect(err).ToNot(HaveOccurred(), "Failed to convert %s into a kubeconfig file", key)
return config
}
func (p *clusterProxy) isDockerCluster(ctx context.Context, namespace string, name string) bool {
cl := p.GetClient()
cluster := &clusterv1.Cluster{}
key := client.ObjectKey{
Name: name,
Namespace: namespace,
}
Eventually(func() error {
return cl.Get(ctx, key, cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get %s", key)
return cluster.Spec.InfrastructureRef.Kind == "DockerCluster"
}
func (p *clusterProxy) fixConfig(ctx context.Context, name string, config *api.Config) {
containerRuntime, err := container.NewDockerClient()
Expect(err).ToNot(HaveOccurred(), "Failed to get Docker runtime client")
ctx = container.RuntimeInto(ctx, containerRuntime)
lbContainerName := name + "-lb"
port, err := containerRuntime.GetHostPort(ctx, lbContainerName, "6443/tcp")
Expect(err).ToNot(HaveOccurred(), "Failed to get load balancer port")
controlPlaneURL := &url.URL{
Scheme: "https",
Host: "127.0.0.1:" + port,
}
currentCluster := config.Contexts[config.CurrentContext].Cluster
config.Clusters[currentCluster].Server = controlPlaneURL.String()
}
// Dispose clusterProxy internal resources (the operation does not affects the Kubernetes cluster).
func (p *clusterProxy) Dispose(ctx context.Context) {
Expect(ctx).NotTo(BeNil(), "ctx is required for Dispose")
if p.shouldCleanupKubeconfig {
if err := os.Remove(p.kubeconfigPath); err != nil {
log.Logf("Deleting the kubeconfig file %q file. You may need to remove this by hand.", p.kubeconfigPath)
}
}
}

View File

@ -0,0 +1,174 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"strconv"
. "github.com/onsi/gomega"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// GetClusterClassByNameInput is the input for GetClusterClassByName.
type GetClusterClassByNameInput struct {
Getter Getter
Name string
Namespace string
}
// GetClusterClassByName returns a ClusterClass object given his name and namespace.
func GetClusterClassByName(ctx context.Context, input GetClusterClassByNameInput) *clusterv1.ClusterClass {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetClusterClassByName")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling GetClusterClassByName")
Expect(input.Namespace).ToNot(BeNil(), "Invalid argument. input.Namespace can't be empty when calling GetClusterClassByName")
Expect(input.Name).ToNot(BeNil(), "Invalid argument. input.Name can't be empty when calling GetClusterClassByName")
clusterClass := &clusterv1.ClusterClass{}
key := client.ObjectKey{
Namespace: input.Namespace,
Name: input.Name,
}
Eventually(func() error {
return input.Getter.Get(ctx, key, clusterClass)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get ClusterClass object %s", klog.KRef(input.Namespace, input.Name))
return clusterClass
}
// UpgradeClusterTopologyAndWaitForUpgradeInput is the input type for UpgradeClusterTopologyAndWaitForUpgrade.
type UpgradeClusterTopologyAndWaitForUpgradeInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
EtcdImageTag string
DNSImageTag string
MachineDeployments []*clusterv1.MachineDeployment
KubernetesUpgradeVersion string
WaitForMachinesToBeUpgraded []interface{}
WaitForKubeProxyUpgrade []interface{}
WaitForDNSUpgrade []interface{}
WaitForEtcdUpgrade []interface{}
PreWaitForControlPlaneToBeUpgraded func()
PreWaitForMachineDeploymentToBeUpgraded func()
}
// UpgradeClusterTopologyAndWaitForUpgrade upgrades a Cluster topology and waits for it to be upgraded.
// NOTE: This func only works with K3sControlPlane.
func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeClusterTopologyAndWaitForUpgradeInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeClusterTopologyAndWaitForUpgrade")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade")
Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade")
Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeClusterTopologyAndWaitForUpgrade")
Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling UpgradeClusterTopologyAndWaitForUpgrade")
mgmtClient := input.ClusterProxy.GetClient()
log.Logf("Patching the new Kubernetes version to Cluster topology")
patchHelper, err := patch.NewHelper(input.Cluster, mgmtClient)
Expect(err).ToNot(HaveOccurred())
input.Cluster.Spec.Topology.Version = input.KubernetesUpgradeVersion
for i, variable := range input.Cluster.Spec.Topology.Variables {
if variable.Name == "etcdImageTag" {
// NOTE: strconv.Quote is used to produce a valid JSON string.
input.Cluster.Spec.Topology.Variables[i].Value = apiextensionsv1.JSON{Raw: []byte(strconv.Quote(input.EtcdImageTag))}
}
if variable.Name == "coreDNSImageTag" {
// NOTE: strconv.Quote is used to produce a valid JSON string.
input.Cluster.Spec.Topology.Variables[i].Value = apiextensionsv1.JSON{Raw: []byte(strconv.Quote(input.DNSImageTag))}
}
}
Eventually(func() error {
return patchHelper.Patch(ctx, input.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch Cluster topology %s with version %s", klog.KObj(input.Cluster), input.KubernetesUpgradeVersion)
// Once we have patched the Kubernetes Cluster we can run PreWaitForControlPlaneToBeUpgraded.
// Note: This can e.g. be used to verify the BeforeClusterUpgrade lifecycle hook is executed
// and blocking correctly.
if input.PreWaitForControlPlaneToBeUpgraded != nil {
log.Logf("Calling PreWaitForControlPlaneToBeUpgraded")
input.PreWaitForControlPlaneToBeUpgraded()
}
log.Logf("Waiting for control-plane machines to have the upgraded Kubernetes version")
WaitForControlPlaneMachinesToBeUpgraded(ctx, WaitForControlPlaneMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: input.Cluster,
MachineCount: int(*input.ControlPlane.Spec.Replicas),
KubernetesUpgradeVersion: input.KubernetesUpgradeVersion,
}, input.WaitForMachinesToBeUpgraded...)
log.Logf("Waiting for kube-proxy to have the upgraded Kubernetes version")
workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name)
workloadClient := workloadCluster.GetClient()
WaitForKubeProxyUpgrade(ctx, WaitForKubeProxyUpgradeInput{
Getter: workloadClient,
KubernetesVersion: input.KubernetesUpgradeVersion,
}, input.WaitForKubeProxyUpgrade...)
// Wait for the CoreDNS upgrade if the DNSImageTag is set.
if input.DNSImageTag != "" {
log.Logf("Waiting for CoreDNS to have the upgraded image tag")
WaitForDNSUpgrade(ctx, WaitForDNSUpgradeInput{
Getter: workloadClient,
DNSVersion: input.DNSImageTag,
}, input.WaitForDNSUpgrade...)
}
// Wait for the etcd upgrade if the EtcdImageTag is set.
if input.EtcdImageTag != "" {
log.Logf("Waiting for etcd to have the upgraded image tag")
lblSelector, err := labels.Parse("component=etcd")
Expect(err).ToNot(HaveOccurred())
WaitForPodListCondition(ctx, WaitForPodListConditionInput{
Lister: workloadClient,
ListOptions: &client.ListOptions{LabelSelector: lblSelector},
Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
}, input.WaitForEtcdUpgrade...)
}
// Once the ControlPlane is upgraded we can run PreWaitForMachineDeploymentToBeUpgraded.
// Note: This can e.g. be used to verify the AfterControlPlaneUpgrade lifecycle hook is executed
// and blocking correctly.
if input.PreWaitForMachineDeploymentToBeUpgraded != nil {
log.Logf("Calling PreWaitForMachineDeploymentToBeUpgraded")
input.PreWaitForMachineDeploymentToBeUpgraded()
}
for _, deployment := range input.MachineDeployments {
if *deployment.Spec.Replicas > 0 {
log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded to %s",
klog.KObj(deployment), input.KubernetesUpgradeVersion)
WaitForMachineDeploymentMachinesToBeUpgraded(ctx, WaitForMachineDeploymentMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: input.Cluster,
MachineCount: int(*deployment.Spec.Replicas),
KubernetesUpgradeVersion: input.KubernetesUpgradeVersion,
MachineDeployment: *deployment,
}, input.WaitForMachinesToBeUpgraded...)
}
}
}

View File

@ -0,0 +1,418 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterctl
import (
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client"
clusterctllog "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
"github.com/kubesphere/kubekey/test/e2e/framework/clusterctl/logger"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// Provide E2E friendly wrappers for the clusterctl client library.
const (
// DefaultFlavor for ConfigClusterInput; use it for getting the cluster-template.yaml file.
DefaultFlavor = ""
)
const (
// DefaultInfrastructureProvider for ConfigClusterInput; use it for using the only infrastructure provider installed in a cluster.
DefaultInfrastructureProvider = ""
)
// InitInput is the input for Init.
type InitInput struct {
LogFolder string
ClusterctlConfigPath string
KubeconfigPath string
CoreProvider string
BootstrapProviders []string
ControlPlaneProviders []string
InfrastructureProviders []string
}
// Init calls clusterctl init with the list of providers defined in the local repository.
func Init(_ context.Context, input InitInput) {
log.Logf("clusterctl init --core %s --bootstrap %s --control-plane %s --infrastructure %s --config %s --kubeconfig %s",
input.CoreProvider,
strings.Join(input.BootstrapProviders, ","),
strings.Join(input.ControlPlaneProviders, ","),
strings.Join(input.InfrastructureProviders, ","),
input.ClusterctlConfigPath,
input.KubeconfigPath,
)
initOpt := clusterctlclient.InitOptions{
Kubeconfig: clusterctlclient.Kubeconfig{
Path: input.KubeconfigPath,
Context: "",
},
CoreProvider: input.CoreProvider,
BootstrapProviders: input.BootstrapProviders,
ControlPlaneProviders: input.ControlPlaneProviders,
InfrastructureProviders: input.InfrastructureProviders,
LogUsageInstructions: true,
WaitProviders: true,
}
clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-init.log", input.LogFolder)
defer log.Close()
_, err := clusterctlClient.Init(initOpt)
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl init")
}
// InitWithBinary uses clusterctl binary to run init with the list of providers defined in the local repository.
func InitWithBinary(_ context.Context, binary string, input InitInput) {
log.Logf("clusterctl init --core %s --bootstrap %s --control-plane %s --infrastructure %s --config %s --kubeconfig %s",
input.CoreProvider,
strings.Join(input.BootstrapProviders, ","),
strings.Join(input.ControlPlaneProviders, ","),
strings.Join(input.InfrastructureProviders, ","),
input.ClusterctlConfigPath,
input.KubeconfigPath,
)
args := []string{"init", "--config", input.ClusterctlConfigPath, "--kubeconfig", input.KubeconfigPath}
if input.CoreProvider != "" {
args = append(args, "--core", input.CoreProvider)
}
if len(input.BootstrapProviders) > 0 {
args = append(args, "--bootstrap", strings.Join(input.BootstrapProviders, ","))
}
if len(input.InfrastructureProviders) > 0 {
args = append(args, "--infrastructure", strings.Join(input.InfrastructureProviders, ","))
}
cmd := exec.Command(binary, args...) //nolint:gosec // We don't care about command injection here.
out, err := cmd.CombinedOutput()
_ = os.WriteFile(filepath.Join(input.LogFolder, "clusterctl-init.log"), out, 0644) //nolint:gosec // this is a log file to be shared via prow artifacts
var stdErr string
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
stdErr = string(exitErr.Stderr)
}
}
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl init:\nstdout:\n%s\nstderr:\n%s", string(out), stdErr)
}
// UpgradeInput is the input for Upgrade.
type UpgradeInput struct {
LogFolder string
ClusterctlConfigPath string
ClusterctlVariables map[string]string
ClusterName string
KubeconfigPath string
Contract string
CoreProvider string
BootstrapProviders []string
ControlPlaneProviders []string
InfrastructureProviders []string
IPAMProviders []string
RuntimeExtensionProviders []string
}
// Upgrade calls clusterctl upgrade apply with the list of providers defined in the local repository.
func Upgrade(ctx context.Context, input UpgradeInput) {
if len(input.ClusterctlVariables) > 0 {
outputPath := filepath.Join(filepath.Dir(input.ClusterctlConfigPath), fmt.Sprintf("clusterctl-upgrade-config-%s.yaml", input.ClusterName))
copyAndAmendClusterctlConfig(ctx, copyAndAmendClusterctlConfigInput{
ClusterctlConfigPath: input.ClusterctlConfigPath,
OutputPath: outputPath,
Variables: input.ClusterctlVariables,
})
input.ClusterctlConfigPath = outputPath
}
// Check if the user want a custom upgrade
isCustomUpgrade := input.CoreProvider != "" ||
len(input.BootstrapProviders) > 0 ||
len(input.ControlPlaneProviders) > 0 ||
len(input.InfrastructureProviders) > 0 ||
len(input.IPAMProviders) > 0 ||
len(input.RuntimeExtensionProviders) > 0
Expect((input.Contract != "" && !isCustomUpgrade) || (input.Contract == "" && isCustomUpgrade)).To(BeTrue(), `Invalid arguments. Either the input.Contract parameter or at least one of the following providers has to be set:
input.CoreProvider, input.BootstrapProviders, input.ControlPlaneProviders, input.InfrastructureProviders, input.IPAMProviders, input.RuntimeExtensionProviders`)
if isCustomUpgrade {
log.Logf("clusterctl upgrade apply --core %s --bootstrap %s --control-plane %s --infrastructure %s --ipam %s --runtime-extension %s --config %s --kubeconfig %s",
input.CoreProvider,
strings.Join(input.BootstrapProviders, ","),
strings.Join(input.ControlPlaneProviders, ","),
strings.Join(input.InfrastructureProviders, ","),
strings.Join(input.IPAMProviders, ","),
strings.Join(input.RuntimeExtensionProviders, ","),
input.ClusterctlConfigPath,
input.KubeconfigPath,
)
} else {
log.Logf("clusterctl upgrade apply --contract %s --config %s --kubeconfig %s",
input.Contract,
input.ClusterctlConfigPath,
input.KubeconfigPath,
)
}
upgradeOpt := clusterctlclient.ApplyUpgradeOptions{
Kubeconfig: clusterctlclient.Kubeconfig{
Path: input.KubeconfigPath,
Context: "",
},
Contract: input.Contract,
CoreProvider: input.CoreProvider,
BootstrapProviders: input.BootstrapProviders,
ControlPlaneProviders: input.ControlPlaneProviders,
InfrastructureProviders: input.InfrastructureProviders,
WaitProviders: true,
}
clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder)
defer log.Close()
err := clusterctlClient.ApplyUpgrade(upgradeOpt)
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade")
}
// DeleteInput is the input for Delete.
type DeleteInput struct {
LogFolder string
ClusterctlConfigPath string
KubeconfigPath string
}
// Delete calls clusterctl delete --all.
func Delete(_ context.Context, input DeleteInput) {
log.Logf("clusterctl delete --all")
deleteOpts := clusterctlclient.DeleteOptions{
Kubeconfig: clusterctlclient.Kubeconfig{
Path: input.KubeconfigPath,
Context: "",
},
DeleteAll: true,
}
clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-delete.log", input.LogFolder)
defer log.Close()
err := clusterctlClient.Delete(deleteOpts)
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade")
}
// ConfigClusterInput is the input for ConfigCluster.
type ConfigClusterInput struct {
LogFolder string
ClusterctlConfigPath string
KubeconfigPath string
InfrastructureProvider string
Namespace string
ClusterName string
KubernetesVersion string
ControlPlaneMachineCount *int64
WorkerMachineCount *int64
Flavor string
ClusterctlVariables map[string]string
}
// ConfigCluster gets a workload cluster based on a template.
func ConfigCluster(ctx context.Context, input ConfigClusterInput) []byte {
log.Logf("clusterctl config cluster %s --infrastructure %s --kubernetes-version %s --control-plane-machine-count %d --worker-machine-count %d --flavor %s",
input.ClusterName,
valueOrDefault(input.InfrastructureProvider),
input.KubernetesVersion,
*input.ControlPlaneMachineCount,
*input.WorkerMachineCount,
valueOrDefault(input.Flavor),
)
templateOptions := clusterctlclient.GetClusterTemplateOptions{
Kubeconfig: clusterctlclient.Kubeconfig{
Path: input.KubeconfigPath,
Context: "",
},
ProviderRepositorySource: &clusterctlclient.ProviderRepositorySourceOptions{
InfrastructureProvider: input.InfrastructureProvider,
Flavor: input.Flavor,
},
ClusterName: input.ClusterName,
KubernetesVersion: input.KubernetesVersion,
ControlPlaneMachineCount: input.ControlPlaneMachineCount,
WorkerMachineCount: input.WorkerMachineCount,
TargetNamespace: input.Namespace,
}
if len(input.ClusterctlVariables) > 0 {
outputPath := filepath.Join(filepath.Dir(input.ClusterctlConfigPath), fmt.Sprintf("clusterctl-upgrade-config-%s.yaml", input.ClusterName))
copyAndAmendClusterctlConfig(ctx, copyAndAmendClusterctlConfigInput{
ClusterctlConfigPath: input.ClusterctlConfigPath,
OutputPath: outputPath,
Variables: input.ClusterctlVariables,
})
input.ClusterctlConfigPath = outputPath
}
clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, fmt.Sprintf("%s-cluster-template.yaml", input.ClusterName), input.LogFolder)
defer log.Close()
template, err := clusterctlClient.GetClusterTemplate(templateOptions)
Expect(err).ToNot(HaveOccurred(), "Failed to run clusterctl config cluster")
yaml, err := template.Yaml()
Expect(err).ToNot(HaveOccurred(), "Failed to generate yaml for the workload cluster template")
_, _ = log.WriteString(string(yaml))
return yaml
}
// ConfigClusterWithBinary uses clusterctl binary to run config cluster or generate cluster.
// NOTE: This func detects the clusterctl version and uses config cluster or generate cluster
// accordingly. We can drop the detection when we don't have to support clusterctl v0.3.x anymore.
func ConfigClusterWithBinary(_ context.Context, clusterctlBinaryPath string, input ConfigClusterInput) []byte {
log.Logf("Detect clusterctl version via: clusterctl version")
out, err := exec.Command(clusterctlBinaryPath, "version").Output()
Expect(err).ToNot(HaveOccurred(), "error running clusterctl version")
var clusterctlSupportsGenerateCluster bool
if strings.Contains(string(out), "Major:\"1\"") {
log.Logf("Detected clusterctl v1.x")
clusterctlSupportsGenerateCluster = true
}
var cmd *exec.Cmd
if clusterctlSupportsGenerateCluster {
log.Logf("clusterctl generate cluster %s --infrastructure %s --kubernetes-version %s --control-plane-machine-count %d --worker-machine-count %d --flavor %s",
input.ClusterName,
valueOrDefault(input.InfrastructureProvider),
input.KubernetesVersion,
*input.ControlPlaneMachineCount,
*input.WorkerMachineCount,
valueOrDefault(input.Flavor),
)
cmd = exec.Command(clusterctlBinaryPath, "generate", "cluster", //nolint:gosec // We don't care about command injection here.
input.ClusterName,
"--infrastructure", input.InfrastructureProvider,
"--kubernetes-version", input.KubernetesVersion,
"--control-plane-machine-count", fmt.Sprint(*input.ControlPlaneMachineCount),
"--worker-machine-count", fmt.Sprint(*input.WorkerMachineCount),
"--flavor", input.Flavor,
"--target-namespace", input.Namespace,
"--config", input.ClusterctlConfigPath,
"--kubeconfig", input.KubeconfigPath,
)
} else {
log.Logf("clusterctl config cluster %s --infrastructure %s --kubernetes-version %s --control-plane-machine-count %d --worker-machine-count %d --flavor %s",
input.ClusterName,
valueOrDefault(input.InfrastructureProvider),
input.KubernetesVersion,
*input.ControlPlaneMachineCount,
*input.WorkerMachineCount,
valueOrDefault(input.Flavor),
)
cmd = exec.Command(clusterctlBinaryPath, "config", "cluster", //nolint:gosec // We don't care about command injection here.
input.ClusterName,
"--infrastructure", input.InfrastructureProvider,
"--kubernetes-version", input.KubernetesVersion,
"--control-plane-machine-count", fmt.Sprint(*input.ControlPlaneMachineCount),
"--worker-machine-count", fmt.Sprint(*input.WorkerMachineCount),
"--flavor", input.Flavor,
"--target-namespace", input.Namespace,
"--config", input.ClusterctlConfigPath,
"--kubeconfig", input.KubeconfigPath,
)
}
out, err = cmd.Output()
_ = os.WriteFile(filepath.Join(input.LogFolder, fmt.Sprintf("%s-cluster-template.yaml", input.ClusterName)), out, 0644) //nolint:gosec // this is a log file to be shared via prow artifacts
var stdErr string
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
stdErr = string(exitErr.Stderr)
}
}
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl config cluster:\nstdout:\n%s\nstderr:\n%s", string(out), stdErr)
return out
}
// MoveInput is the input for ClusterctlMove.
type MoveInput struct {
LogFolder string
ClusterctlConfigPath string
FromKubeconfigPath string
ToKubeconfigPath string
Namespace string
}
// Move moves workload clusters.
func Move(ctx context.Context, input MoveInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for Move")
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling Move")
Expect(input.FromKubeconfigPath).To(BeAnExistingFile(), "Invalid argument. input.FromKubeconfigPath must be an existing file when calling Move")
Expect(input.ToKubeconfigPath).To(BeAnExistingFile(), "Invalid argument. input.ToKubeconfigPath must be an existing file when calling Move")
logDir := path.Join(input.LogFolder, "logs", input.Namespace)
Expect(os.MkdirAll(logDir, 0750)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for Move")
By("Moving workload clusters")
log.Logf("clusterctl move --from-kubeconfig %s --to-kubeconfig %s --namespace %s",
input.FromKubeconfigPath,
input.ToKubeconfigPath,
input.Namespace,
)
clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-move.log", logDir)
defer log.Close()
options := clusterctlclient.MoveOptions{
FromKubeconfig: clusterctlclient.Kubeconfig{Path: input.FromKubeconfigPath, Context: ""},
ToKubeconfig: clusterctlclient.Kubeconfig{Path: input.ToKubeconfigPath, Context: ""},
Namespace: input.Namespace,
}
Expect(clusterctlClient.Move(options)).To(Succeed(), "Failed to run clusterctl move")
}
func getClusterctlClientWithLogger(configPath, logName, logFolder string) (clusterctlclient.Client, *logger.LogFile) {
log := logger.OpenLogFile(logger.OpenLogFileInput{
LogFolder: logFolder,
Name: logName,
})
clusterctllog.SetLogger(log.Logger())
c, err := clusterctlclient.New(configPath)
Expect(err).ToNot(HaveOccurred(), "Failed to create the clusterctl client library")
return c, log
}
func valueOrDefault(v string) string {
if v != "" {
return v
}
return "(default)"
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterctl
import (
"os"
. "github.com/onsi/gomega"
"sigs.k8s.io/yaml"
)
// Provide helpers for working with the clusterctl config file.
// clusterctlConfig defines the content of the clusterctl config file.
// The main responsibility for this structure is to point clusterctl to the local repository that should be used for E2E tests.
type clusterctlConfig struct {
Path string
Values map[string]interface{}
}
// providerConfig mirrors the clusterctl config.Provider interface and allows serialization of the corresponding info into a clusterctl config file.
type providerConfig struct {
Name string `json:"name,omitempty"`
URL string `json:"url,omitempty"`
Type string `json:"type,omitempty"`
}
// write writes a clusterctl config file to disk.
func (c *clusterctlConfig) write() {
data, err := yaml.Marshal(c.Values)
Expect(err).ToNot(HaveOccurred(), "Failed to marshal the clusterctl config file")
Expect(os.WriteFile(c.Path, data, 0600)).To(Succeed(), "Failed to write the clusterctl config file")
}
// read reads a clusterctl config file from disk.
func (c *clusterctlConfig) read() {
data, err := os.ReadFile(c.Path)
Expect(err).ToNot(HaveOccurred())
err = yaml.Unmarshal(data, &c.Values)
Expect(err).ToNot(HaveOccurred(), "Failed to unmarshal the clusterctl config file")
}

View File

@ -0,0 +1,385 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterctl
import (
"context"
"os"
"path/filepath"
"time"
. "github.com/onsi/gomega"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/test/e2e/framework"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// InitManagementClusterAndWatchControllerLogsInput is the input type for InitManagementClusterAndWatchControllerLogs.
type InitManagementClusterAndWatchControllerLogsInput struct {
ClusterProxy framework.ClusterProxy
ClusterctlConfigPath string
CoreProvider string
BootstrapProviders []string
ControlPlaneProviders []string
InfrastructureProviders []string
LogFolder string
DisableMetricsCollection bool
ClusterctlBinaryPath string
}
// InitManagementClusterAndWatchControllerLogs initializes a management using clusterctl and setup watches for controller logs.
// Important: Considering we want to support test suites using existing clusters, clusterctl init is executed only in case
// there are no provider controllers in the cluster; but controller logs watchers are created regardless of the pre-existing providers.
func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input InitManagementClusterAndWatchControllerLogsInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for InitManagementClusterAndWatchControllerLogs")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling InitManagementClusterAndWatchControllerLogs")
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling InitManagementClusterAndWatchControllerLogs")
Expect(input.InfrastructureProviders).ToNot(BeEmpty(), "Invalid argument. input.InfrastructureProviders can't be empty when calling InitManagementClusterAndWatchControllerLogs")
Expect(os.MkdirAll(input.LogFolder, 0750)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for InitManagementClusterAndWatchControllerLogs")
if input.CoreProvider == "" {
input.CoreProvider = config.ClusterAPIProviderName
}
if len(input.BootstrapProviders) == 0 {
input.BootstrapProviders = []string{K3sBootstrapProviderName}
}
if len(input.ControlPlaneProviders) == 0 {
input.ControlPlaneProviders = []string{K3sControlPlaneProviderName}
}
client := input.ClusterProxy.GetClient()
controllersDeployments := framework.GetControllerDeployments(ctx, framework.GetControllerDeploymentsInput{
Lister: client,
})
if len(controllersDeployments) == 0 {
initInput := InitInput{
// pass reference to the management cluster hosting this test
KubeconfigPath: input.ClusterProxy.GetKubeconfigPath(),
// pass the clusterctl config file that points to the local provider repository created for this test
ClusterctlConfigPath: input.ClusterctlConfigPath,
// setup the desired list of providers for a single-tenant management cluster
CoreProvider: input.CoreProvider,
BootstrapProviders: input.BootstrapProviders,
ControlPlaneProviders: input.ControlPlaneProviders,
InfrastructureProviders: input.InfrastructureProviders,
// setup clusterctl logs folder
LogFolder: input.LogFolder,
}
if input.ClusterctlBinaryPath != "" {
InitWithBinary(ctx, input.ClusterctlBinaryPath, initInput)
} else {
Init(ctx, initInput)
}
}
log.Logf("Waiting for provider controllers to be running")
controllersDeployments = framework.GetControllerDeployments(ctx, framework.GetControllerDeploymentsInput{
Lister: client,
})
Expect(controllersDeployments).ToNot(BeEmpty(), "The list of controller deployments should not be empty")
for _, deployment := range controllersDeployments {
framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{
Getter: client,
Deployment: deployment,
}, intervals...)
// Start streaming logs from all controller providers
framework.WatchDeploymentLogs(ctx, framework.WatchDeploymentLogsInput{
GetLister: client,
ClientSet: input.ClusterProxy.GetClientSet(),
Deployment: deployment,
LogPath: filepath.Join(input.LogFolder, "logs", deployment.GetNamespace()),
})
if !input.DisableMetricsCollection {
framework.WatchPodMetrics(ctx, framework.WatchPodMetricsInput{
GetLister: client,
ClientSet: input.ClusterProxy.GetClientSet(),
Deployment: deployment,
MetricsPath: filepath.Join(input.LogFolder, "metrics", deployment.GetNamespace()),
})
}
}
}
// UpgradeManagementClusterAndWaitInput is the input type for UpgradeManagementClusterAndWait.
type UpgradeManagementClusterAndWaitInput struct {
ClusterProxy framework.ClusterProxy
ClusterctlConfigPath string
ClusterctlVariables map[string]string
Contract string
CoreProvider string
BootstrapProviders []string
ControlPlaneProviders []string
InfrastructureProviders []string
IPAMProviders []string
RuntimeExtensionProviders []string
LogFolder string
}
// UpgradeManagementClusterAndWait upgrades provider a management cluster using clusterctl, and waits for the cluster to be ready.
func UpgradeManagementClusterAndWait(ctx context.Context, input UpgradeManagementClusterAndWaitInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeManagementClusterAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeManagementClusterAndWait")
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling UpgradeManagementClusterAndWait")
// Check if the user want a custom upgrade
isCustomUpgrade := input.CoreProvider != "" ||
len(input.BootstrapProviders) > 0 ||
len(input.ControlPlaneProviders) > 0 ||
len(input.InfrastructureProviders) > 0 ||
len(input.IPAMProviders) > 0 ||
len(input.RuntimeExtensionProviders) > 0
Expect((input.Contract != "" && !isCustomUpgrade) || (input.Contract == "" && isCustomUpgrade)).To(BeTrue(), `Invalid argument. Either the input.Contract parameter or at least one of the following providers has to be set:
input.CoreProvider, input.BootstrapProviders, input.ControlPlaneProviders, input.InfrastructureProviders, input.IPAMProviders, input.RuntimeExtensionProviders`)
Expect(os.MkdirAll(input.LogFolder, 0750)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for UpgradeManagementClusterAndWait")
Upgrade(ctx, UpgradeInput{
ClusterctlConfigPath: input.ClusterctlConfigPath,
ClusterctlVariables: input.ClusterctlVariables,
ClusterName: input.ClusterProxy.GetName(),
KubeconfigPath: input.ClusterProxy.GetKubeconfigPath(),
Contract: input.Contract,
CoreProvider: input.CoreProvider,
BootstrapProviders: input.BootstrapProviders,
ControlPlaneProviders: input.ControlPlaneProviders,
InfrastructureProviders: input.InfrastructureProviders,
IPAMProviders: input.IPAMProviders,
RuntimeExtensionProviders: input.RuntimeExtensionProviders,
LogFolder: input.LogFolder,
})
client := input.ClusterProxy.GetClient()
log.Logf("Waiting for provider controllers to be running")
controllersDeployments := framework.GetControllerDeployments(ctx, framework.GetControllerDeploymentsInput{
Lister: client,
ExcludeNamespaces: []string{"capi-webhook-system"}, // this namespace has been dropped in v1alpha4; this ensures we are not waiting for deployments being deleted as part of the upgrade process
})
Expect(controllersDeployments).ToNot(BeEmpty(), "The list of controller deployments should not be empty")
for _, deployment := range controllersDeployments {
framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{
Getter: client,
Deployment: deployment,
}, intervals...)
// Start streaming logs from all controller providers
framework.WatchDeploymentLogs(ctx, framework.WatchDeploymentLogsInput{
GetLister: client,
ClientSet: input.ClusterProxy.GetClientSet(),
Deployment: deployment,
LogPath: filepath.Join(input.LogFolder, "logs", deployment.GetNamespace()),
})
framework.WatchPodMetrics(ctx, framework.WatchPodMetricsInput{
GetLister: client,
ClientSet: input.ClusterProxy.GetClientSet(),
Deployment: deployment,
MetricsPath: filepath.Join(input.LogFolder, "metrics", deployment.GetNamespace()),
})
}
}
// ApplyClusterTemplateAndWaitInput is the input type for ApplyClusterTemplateAndWait.
type ApplyClusterTemplateAndWaitInput struct {
ClusterProxy framework.ClusterProxy
ConfigCluster ConfigClusterInput
CNIManifestPath string
WaitForClusterIntervals []interface{}
WaitForControlPlaneIntervals []interface{}
WaitForMachineDeployments []interface{}
WaitForMachinePools []interface{}
Args []string // extra args to be used during `kubectl apply`
PreWaitForCluster func()
PostMachinesProvisioned func()
ControlPlaneWaiters
}
// Waiter is a function that runs and waits for a long-running operation to finish and updates the result.
type Waiter func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult)
// ControlPlaneWaiters are Waiter functions for the control plane.
type ControlPlaneWaiters struct {
WaitForControlPlaneInitialized Waiter
WaitForControlPlaneMachinesReady Waiter
}
// ApplyClusterTemplateAndWaitResult is the output type for ApplyClusterTemplateAndWait.
type ApplyClusterTemplateAndWaitResult struct {
ClusterClass *clusterv1.ClusterClass
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
MachineDeployments []*clusterv1.MachineDeployment
MachinePools []*expv1.MachinePool
}
// ExpectedWorkerNodes returns the expected number of worker nodes that will
// be provisioned by the given cluster template.
func (r *ApplyClusterTemplateAndWaitResult) ExpectedWorkerNodes() int32 {
expectedWorkerNodes := int32(0)
for _, md := range r.MachineDeployments {
if md.Spec.Replicas != nil {
expectedWorkerNodes += *md.Spec.Replicas
}
}
for _, mp := range r.MachinePools {
if mp.Spec.Replicas != nil {
expectedWorkerNodes += *mp.Spec.Replicas
}
}
return expectedWorkerNodes
}
// ExpectedTotalNodes returns the expected number of nodes that will
// be provisioned by the given cluster template.
func (r *ApplyClusterTemplateAndWaitResult) ExpectedTotalNodes() int32 {
expectedNodes := r.ExpectedWorkerNodes()
if r.ControlPlane != nil && r.ControlPlane.Spec.Replicas != nil {
expectedNodes += *r.ControlPlane.Spec.Replicas
}
return expectedNodes
}
// ApplyClusterTemplateAndWait gets a cluster template using clusterctl, and waits for the cluster to be ready.
// Important! this method assumes the cluster uses a K3sControlPlane and MachineDeployments.
func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) {
setDefaults(&input)
Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait")
Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyClusterTemplateAndWait")
Expect(input.ConfigCluster.ControlPlaneMachineCount).ToNot(BeNil())
Expect(input.ConfigCluster.WorkerMachineCount).ToNot(BeNil())
log.Logf("Creating the workload cluster with name %q using the %q template (Kubernetes %s, %d control-plane machines, %d worker machines)",
input.ConfigCluster.ClusterName, valueOrDefault(input.ConfigCluster.Flavor), input.ConfigCluster.KubernetesVersion, *input.ConfigCluster.ControlPlaneMachineCount, *input.ConfigCluster.WorkerMachineCount)
log.Logf("Getting the cluster template yaml")
workloadClusterTemplate := ConfigCluster(ctx, ConfigClusterInput{
// pass reference to the management cluster hosting this test
KubeconfigPath: input.ConfigCluster.KubeconfigPath,
// pass the clusterctl config file that points to the local provider repository created for this test,
ClusterctlConfigPath: input.ConfigCluster.ClusterctlConfigPath,
// select template
Flavor: input.ConfigCluster.Flavor,
// define template variables
Namespace: input.ConfigCluster.Namespace,
ClusterName: input.ConfigCluster.ClusterName,
KubernetesVersion: input.ConfigCluster.KubernetesVersion,
ControlPlaneMachineCount: input.ConfigCluster.ControlPlaneMachineCount,
WorkerMachineCount: input.ConfigCluster.WorkerMachineCount,
InfrastructureProvider: input.ConfigCluster.InfrastructureProvider,
// setup clusterctl logs folder
LogFolder: input.ConfigCluster.LogFolder,
ClusterctlVariables: input.ConfigCluster.ClusterctlVariables,
})
Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template")
log.Logf("Applying the cluster template yaml to the cluster")
Eventually(func() error {
return input.ClusterProxy.Apply(ctx, workloadClusterTemplate, input.Args...)
}, 10*time.Second).Should(Succeed(), "Failed to apply the cluster template")
// Once we applied the cluster template we can run PreWaitForCluster.
// Note: This can e.g. be used to verify the BeforeClusterCreate lifecycle hook is executed
// and blocking correctly.
if input.PreWaitForCluster != nil {
log.Logf("Calling PreWaitForCluster")
input.PreWaitForCluster()
}
log.Logf("Waiting for the cluster infrastructure to be provisioned")
result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
Getter: input.ClusterProxy.GetClient(),
Namespace: input.ConfigCluster.Namespace,
Name: input.ConfigCluster.ClusterName,
}, input.WaitForClusterIntervals...)
if result.Cluster.Spec.Topology != nil {
result.ClusterClass = framework.GetClusterClassByName(ctx, framework.GetClusterClassByNameInput{
Getter: input.ClusterProxy.GetClient(),
Namespace: input.ConfigCluster.Namespace,
Name: result.Cluster.Spec.Topology.Class,
})
}
log.Logf("Waiting for control plane to be initialized")
input.WaitForControlPlaneInitialized(ctx, input, result)
if input.CNIManifestPath != "" {
log.Logf("Installing a CNI plugin to the workload cluster")
workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, result.Cluster.Namespace, result.Cluster.Name)
cniYaml, err := os.ReadFile(input.CNIManifestPath)
Expect(err).ShouldNot(HaveOccurred())
Expect(workloadCluster.Apply(ctx, cniYaml)).ShouldNot(HaveOccurred())
}
log.Logf("Waiting for control plane to be ready")
input.WaitForControlPlaneMachinesReady(ctx, input, result)
log.Logf("Waiting for the machine deployments to be provisioned")
result.MachineDeployments = framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{
Lister: input.ClusterProxy.GetClient(),
Cluster: result.Cluster,
}, input.WaitForMachineDeployments...)
log.Logf("Waiting for the machine pools to be provisioned")
result.MachinePools = framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{
Getter: input.ClusterProxy.GetClient(),
Lister: input.ClusterProxy.GetClient(),
Cluster: result.Cluster,
}, input.WaitForMachinePools...)
if input.PostMachinesProvisioned != nil {
log.Logf("Calling PostMachinesProvisioned")
input.PostMachinesProvisioned()
}
}
// setDefaults sets the default values for ApplyClusterTemplateAndWaitInput if not set.
// Currently, we set the default ControlPlaneWaiters here, which are implemented for K3sControlPlane.
func setDefaults(input *ApplyClusterTemplateAndWaitInput) {
if input.WaitForControlPlaneInitialized == nil {
input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) {
result.ControlPlane = framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{
Lister: input.ClusterProxy.GetClient(),
Cluster: result.Cluster,
}, input.WaitForControlPlaneIntervals...)
}
}
if input.WaitForControlPlaneMachinesReady == nil {
input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) {
framework.WaitForControlPlaneAndMachinesReady(ctx, framework.WaitForControlPlaneAndMachinesReadyInput{
GetLister: input.ClusterProxy.GetClient(),
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
}, input.WaitForControlPlaneIntervals...)
}
}
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package clusterctl implements clusterctl interaction.
package clusterctl

View File

@ -0,0 +1,646 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterctl
import (
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/utils/pointer"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
clusterctlconfig "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/yaml"
)
// GetVariable returns the value of a variable defined in the e2e config file.
const (
K3sBootstrapProviderName = "k3s"
K3sControlPlaneProviderName = "k3s"
)
// Provides access to the configuration for an e2e test.
// LoadE2EConfigInput is the input for LoadE2EConfig.
type LoadE2EConfigInput struct {
// ConfigPath for the e2e test.
ConfigPath string
}
// LoadE2EConfig loads the configuration for the e2e test environment.
func LoadE2EConfig(ctx context.Context, input LoadE2EConfigInput) *E2EConfig {
configData, err := os.ReadFile(input.ConfigPath)
Expect(err).ToNot(HaveOccurred(), "Failed to read the e2e test config file")
Expect(configData).ToNot(BeEmpty(), "The e2e test config file should not be empty")
config := &E2EConfig{}
Expect(yaml.Unmarshal(configData, config)).To(Succeed(), "Failed to convert the e2e test config file to yaml")
config.Defaults()
config.AbsPaths(filepath.Dir(input.ConfigPath))
Expect(config.Validate()).To(Succeed(), "The e2e test config file is not valid")
return config
}
// E2EConfig defines the configuration of an e2e test environment.
type E2EConfig struct {
// Name is the name of the Kind management cluster.
// Defaults to test-[random generated suffix].
ManagementClusterName string `json:"managementClusterName,omitempty"`
// Images is a list of container images to load into the Kind cluster.
Images []ContainerImage `json:"images,omitempty"`
// Providers is a list of providers to be configured in the local repository that will be created for the e2e test.
// It is required to provide following providers
// - cluster-api
// - bootstrap kubeadm
// - control-plane kubeadm
// - one infrastructure provider
// The test will adapt to the selected infrastructure provider
Providers []ProviderConfig `json:"providers,omitempty"`
// Variables to be added to the clusterctl config file
// Please note that clusterctl read variables from OS environment variables as well, so you can avoid to hard code
// sensitive data in the config file.
Variables map[string]string `json:"variables,omitempty"`
// Intervals to be used for long operations during tests
Intervals map[string][]string `json:"intervals,omitempty"`
}
// ProviderConfig describes a provider to be configured in the local repository that will be created for the e2e test.
type ProviderConfig struct {
// Name is the name of the provider.
Name string `json:"name"`
// Type is the type of the provider.
Type string `json:"type"`
// Versions is a list of component YAML to be added to the local repository, one for each release.
// Please note that the first source will be used a default release for this provider.
Versions []ProviderVersionSource `json:"versions,omitempty"`
// Files is a list of files to be copied into the local repository for all the releases.
Files []Files `json:"files,omitempty"`
}
// LoadImageBehavior indicates the behavior when loading an image.
type LoadImageBehavior string
const (
// MustLoadImage causes a load operation to fail if the image cannot be
// loaded.
MustLoadImage LoadImageBehavior = "mustLoad"
// TryLoadImage causes any errors that occur when loading an image to be
// ignored.
TryLoadImage LoadImageBehavior = "tryLoad"
)
// ContainerImage describes an image to load into a cluster and the behavior
// when loading the image.
type ContainerImage struct {
// Name is the fully qualified name of the image.
Name string
// LoadBehavior may be used to dictate whether a failed load operation
// should fail the test run. This is useful when wanting to load images
// *if* they exist locally, but not wanting to fail if they don't.
//
// Defaults to MustLoadImage.
LoadBehavior LoadImageBehavior
}
// ComponentSourceType indicates how a component's source should be obtained.
type ComponentSourceType string
const (
// URLSource is component YAML available directly via a URL.
// The URL may begin with http://, https:// or file://(can be omitted, relative paths supported).
URLSource ComponentSourceType = "url"
// KustomizeSource is a valid kustomization root that can be used to produce
// the component YAML.
KustomizeSource ComponentSourceType = "kustomize"
)
// ProviderVersionSource describes how to obtain a component's YAML.
type ProviderVersionSource struct {
// Name is used for logging when a component has multiple sources.
Name string `json:"name,omitempty"`
// Value is the source of the component's YAML.
// May be a URL or a kustomization root (specified by Type).
// If a Type=url then Value may begin with file://, http://, or https://.
// If a Type=kustomize then Value may be any valid go-getter URL. For
// more information please see https://github.com/hashicorp/go-getter#url-format.
Value string `json:"value"`
// Contract defines the Cluster API contract version a specific version of the provider abides to.
Contract string `json:"contract,omitempty"`
// Type describes how to process the source of the component's YAML.
//
// Defaults to "kustomize".
Type ComponentSourceType `json:"type,omitempty"`
// Replacements is a list of patterns to replace in the component YAML
// prior to application.
Replacements []ComponentReplacement `json:"replacements,omitempty"`
// Files is a list of files to be copied into the local repository for this release.
Files []Files `json:"files,omitempty"`
}
// ComponentWaiterType indicates the type of check to use to determine if the
// installed components are ready.
type ComponentWaiterType string
const (
// ServiceWaiter indicates to wait until a service's condition is Available.
// When ComponentWaiter.Value is set to "service", the ComponentWaiter.Value
// should be set to the name of a Service resource.
ServiceWaiter ComponentWaiterType = "service"
// PodsWaiter indicates to wait until all the pods in a namespace have a
// condition of Ready.
// When ComponentWaiter.Value is set to "pods", the ComponentWaiter.Value
// should be set to the name of a Namespace resource.
PodsWaiter ComponentWaiterType = "pods"
)
// ComponentWaiter contains information to help determine whether installed
// components are ready.
type ComponentWaiter struct {
// Value varies depending on the specified Type.
// Please see the documentation for the different WaiterType constants to
// understand the valid values for this field.
Value string `json:"value"`
// Type describes the type of check to perform.
//
// Defaults to "pods".
Type ComponentWaiterType `json:"type,omitempty"`
}
// ComponentReplacement is used to replace some of the generated YAML prior
// to application.
type ComponentReplacement struct {
// Old is the pattern to replace.
// A regular expression may be used.
Old string `json:"old"`
// New is the string used to replace the old pattern.
// An empty string is valid.
New string `json:"new,omitempty"`
}
// ComponentConfig describes a component required by the e2e test environment.
type ComponentConfig struct {
// Name is the name of the component.
// This field is primarily used for logging.
Name string `json:"name"`
// Sources is an optional list of component YAML to apply to the management
// cluster.
// This field may be omitted when wanting only to block progress via one or
// more Waiters.
Sources []ProviderVersionSource `json:"sources,omitempty"`
// Waiters is an optional list of checks to perform in order to determine
// whether or not the installed components are ready.
Waiters []ComponentWaiter `json:"waiters,omitempty"`
}
// Files contains information about files to be copied into the local repository.
type Files struct {
// SourcePath path of the file.
SourcePath string `json:"sourcePath"`
// TargetName name of the file copied into the local repository. if empty, the source name
// Will be preserved
TargetName string `json:"targetName,omitempty"`
}
// Defaults assigns default values to the object. More specifically:
// - ManagementClusterName gets a default name if empty.
// - Providers version gets type KustomizeSource if not otherwise specified.
// - Providers file gets targetName = sourceName if not otherwise specified.
// - Images gets LoadBehavior = MustLoadImage if not otherwise specified.
func (c *E2EConfig) Defaults() {
if c.ManagementClusterName == "" {
c.ManagementClusterName = fmt.Sprintf("test-%s", util.RandomString(6))
}
for i := range c.Providers {
provider := &c.Providers[i]
for j := range provider.Versions {
version := &provider.Versions[j]
if version.Type == "" {
version.Type = KustomizeSource
}
for j := range version.Files {
file := &version.Files[j]
if file.SourcePath != "" && file.TargetName == "" {
file.TargetName = filepath.Base(file.SourcePath)
}
}
}
for j := range provider.Files {
file := &provider.Files[j]
if file.SourcePath != "" && file.TargetName == "" {
file.TargetName = filepath.Base(file.SourcePath)
}
}
}
imageReplacer := strings.NewReplacer("{OS}", runtime.GOOS, "{ARCH}", runtime.GOARCH)
for i := range c.Images {
containerImage := &c.Images[i]
containerImage.Name = imageReplacer.Replace(containerImage.Name)
if containerImage.LoadBehavior == "" {
containerImage.LoadBehavior = MustLoadImage
}
}
}
// AbsPaths makes relative paths absolute using the given base path.
func (c *E2EConfig) AbsPaths(basePath string) {
for i := range c.Providers {
provider := &c.Providers[i]
for j := range provider.Versions {
version := &provider.Versions[j]
if version.Type != URLSource && version.Value != "" {
if !filepath.IsAbs(version.Value) {
version.Value = filepath.Join(basePath, version.Value)
}
} else if version.Type == URLSource && version.Value != "" {
// Skip error, will be checked later when loading contents from URL
u, _ := url.Parse(version.Value)
if u != nil {
switch u.Scheme {
case "", fileURIScheme:
fp := strings.TrimPrefix(version.Value, fmt.Sprintf("%s://", fileURIScheme))
if !filepath.IsAbs(fp) {
version.Value = filepath.Join(basePath, fp)
}
}
}
}
for j := range version.Files {
file := &version.Files[j]
if file.SourcePath != "" {
if !filepath.IsAbs(file.SourcePath) {
file.SourcePath = filepath.Join(basePath, file.SourcePath)
}
}
}
}
for j := range provider.Files {
file := &provider.Files[j]
if file.SourcePath != "" {
if !filepath.IsAbs(file.SourcePath) {
file.SourcePath = filepath.Join(basePath, file.SourcePath)
}
}
}
}
}
func errInvalidArg(format string, args ...interface{}) error {
msg := fmt.Sprintf(format, args...)
return errors.Errorf("invalid argument: %s", msg)
}
func errEmptyArg(argName string) error {
return errInvalidArg("%s is empty", argName)
}
// Validate validates the configuration. More specifically:
// - ManagementClusterName should not be empty.
// - There should be one CoreProvider (cluster-api), one BootstrapProvider (kubeadm), one ControlPlaneProvider (kubeadm).
// - There should be one InfraProvider (pick your own).
// - Image should have name and loadBehavior be one of [mustload, tryload].
// - Intervals should be valid ginkgo intervals.
func (c *E2EConfig) Validate() error {
// ManagementClusterName should not be empty.
if c.ManagementClusterName == "" {
return errEmptyArg("ManagementClusterName")
}
if err := c.validateProviders(); err != nil {
return err
}
// Image should have name and loadBehavior be one of [mustload, tryload].
for i, containerImage := range c.Images {
if containerImage.Name == "" {
return errEmptyArg(fmt.Sprintf("Images[%d].Name=%q", i, containerImage.Name))
}
switch containerImage.LoadBehavior {
case MustLoadImage, TryLoadImage:
// Valid
default:
return errInvalidArg("Images[%d].LoadBehavior=%q", i, containerImage.LoadBehavior)
}
}
// Intervals should be valid ginkgo intervals.
for k, intervals := range c.Intervals {
switch len(intervals) {
case 0:
return errInvalidArg("Intervals[%s]=%q", k, intervals)
case 1, 2:
default:
return errInvalidArg("Intervals[%s]=%q", k, intervals)
}
for _, i := range intervals {
if _, err := time.ParseDuration(i); err != nil {
return errInvalidArg("Intervals[%s]=%q", k, intervals)
}
}
}
return nil
}
// validateProviders validates the provider configuration. More specifically:
// - Providers name should not be empty.
// - Providers type should be one of [CoreProvider, BootstrapProvider, ControlPlaneProvider, InfrastructureProvider].
// - Providers version should have a name.
// - Providers version.type should be one of [url, kustomize].
// - Providers version.replacements.old should be a valid regex.
// - Providers files should be an existing file and have a target name.
func (c *E2EConfig) validateProviders() error {
providersByType := map[clusterctlv1.ProviderType][]string{
clusterctlv1.CoreProviderType: nil,
clusterctlv1.BootstrapProviderType: nil,
clusterctlv1.ControlPlaneProviderType: nil,
clusterctlv1.InfrastructureProviderType: nil,
}
for i, providerConfig := range c.Providers {
// Providers name should not be empty.
if providerConfig.Name == "" {
return errEmptyArg(fmt.Sprintf("Providers[%d].Name", i))
}
// Providers type should be one of the know types.
providerType := clusterctlv1.ProviderType(providerConfig.Type)
switch providerType {
case clusterctlv1.CoreProviderType, clusterctlv1.BootstrapProviderType, clusterctlv1.ControlPlaneProviderType, clusterctlv1.InfrastructureProviderType:
providersByType[providerType] = append(providersByType[providerType], providerConfig.Name)
default:
return errInvalidArg("Providers[%d].Type=%q", i, providerConfig.Type)
}
// Providers providerVersion should have a name.
// Providers providerVersion.type should be one of [url, kustomize].
// Providers providerVersion.replacements.old should be a valid regex.
for j, providerVersion := range providerConfig.Versions {
if providerVersion.Name == "" {
return errEmptyArg(fmt.Sprintf("Providers[%d].Sources[%d].Name", i, j))
}
if _, err := version.ParseSemantic(providerVersion.Name); err != nil {
return errInvalidArg("Providers[%d].Sources[%d].Name=%q", i, j, providerVersion.Name)
}
switch providerVersion.Type {
case URLSource, KustomizeSource:
if providerVersion.Value == "" {
return errEmptyArg(fmt.Sprintf("Providers[%d].Sources[%d].Value", i, j))
}
default:
return errInvalidArg("Providers[%d].Sources[%d].Type=%q", i, j, providerVersion.Type)
}
for k, replacement := range providerVersion.Replacements {
if _, err := regexp.Compile(replacement.Old); err != nil {
return errInvalidArg("Providers[%d].Sources[%d].Replacements[%d].Old=%q: %v", i, j, k, replacement.Old, err)
}
}
// Providers files should be an existing file and have a target name.
for k, file := range providerVersion.Files {
if file.SourcePath == "" {
return errInvalidArg("Providers[%d].Sources[%d].Files[%d].SourcePath=%q", i, j, k, file.SourcePath)
}
if !fileExists(file.SourcePath) {
return errInvalidArg("Providers[%d].Sources[%d].Files[%d].SourcePath=%q", i, j, k, file.SourcePath)
}
if file.TargetName == "" {
return errInvalidArg("Providers[%d].Sources[%d].Files[%d].TargetName=%q", i, j, k, file.TargetName)
}
}
}
// Providers files should be an existing file and have a target name.
for j, file := range providerConfig.Files {
if file.SourcePath == "" {
return errInvalidArg("Providers[%d].Files[%d].SourcePath=%q", i, j, file.SourcePath)
}
if !fileExists(file.SourcePath) {
return errInvalidArg("Providers[%d].Files[%d].SourcePath=%q", i, j, file.SourcePath)
}
if file.TargetName == "" {
return errInvalidArg("Providers[%d].Files[%d].TargetName=%q", i, j, file.TargetName)
}
}
}
// There should be one CoreProvider (cluster-api), one BootstrapProvider (kubeadm), one ControlPlaneProvider (kubeadm).
if len(providersByType[clusterctlv1.CoreProviderType]) != 1 {
return errInvalidArg("invalid config: it is required to have exactly one core-provider")
}
if providersByType[clusterctlv1.CoreProviderType][0] != clusterctlconfig.ClusterAPIProviderName {
return errInvalidArg("invalid config: core-provider should be named %s", clusterctlconfig.ClusterAPIProviderName)
}
if len(providersByType[clusterctlv1.BootstrapProviderType]) != 1 {
return errInvalidArg("invalid config: it is required to have exactly one bootstrap-provider")
}
if providersByType[clusterctlv1.BootstrapProviderType][0] != K3sBootstrapProviderName {
return errInvalidArg("invalid config: bootstrap-provider should be named %s", K3sBootstrapProviderName)
}
if len(providersByType[clusterctlv1.ControlPlaneProviderType]) != 1 {
return errInvalidArg("invalid config: it is required to have exactly one control-plane-provider")
}
if providersByType[clusterctlv1.ControlPlaneProviderType][0] != K3sControlPlaneProviderName {
return errInvalidArg("invalid config: control-plane-provider should be named %s", K3sControlPlaneProviderName)
}
// There should be one InfraProvider (pick your own).
if len(providersByType[clusterctlv1.InfrastructureProviderType]) < 1 {
return errInvalidArg("invalid config: it is required to have at least one infrastructure-provider")
}
return nil
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
// InfrastructureProviders returns the infrastructure provider selected for running this E2E test.
func (c *E2EConfig) InfrastructureProviders() []string {
return c.getProviders(clusterctlv1.InfrastructureProviderType)
}
func (c *E2EConfig) getProviders(t clusterctlv1.ProviderType) []string {
InfraProviders := []string{}
for _, provider := range c.Providers {
if provider.Type == string(t) {
InfraProviders = append(InfraProviders, provider.Name)
}
}
return InfraProviders
}
// HasDockerProvider returns true if the Docker provider is configured for running this E2E test.
func (c *E2EConfig) HasDockerProvider() bool {
for _, i := range c.InfrastructureProviders() {
if i == "docker" {
return true
}
}
return false
}
// GetIntervals returns the intervals to be applied to a Eventually operation.
// It searches for [spec]/[key] intervals first, and if it is not found, it searches
// for default/[key]. If also the default/[key] intervals are not found,
// ginkgo DefaultEventuallyTimeout and DefaultEventuallyPollingInterval are used.
func (c *E2EConfig) GetIntervals(spec, key string) []interface{} {
intervals, ok := c.Intervals[fmt.Sprintf("%s/%s", spec, key)]
if !ok {
if intervals, ok = c.Intervals[fmt.Sprintf("default/%s", key)]; !ok {
return nil
}
}
intervalsInterfaces := make([]interface{}, len(intervals))
for i := range intervals {
intervalsInterfaces[i] = intervals[i]
}
return intervalsInterfaces
}
// HasVariable returns true if the variable exists in the config.
func (c *E2EConfig) HasVariable(varName string) bool {
if _, ok := os.LookupEnv(varName); ok {
return true
}
_, ok := c.Variables[varName]
return ok
}
// GetVariable returns a variable from environment variables or from the e2e config file.
func (c *E2EConfig) GetVariable(varName string) string {
if value, ok := os.LookupEnv(varName); ok {
return value
}
value, ok := c.Variables[varName]
Expect(ok).NotTo(BeFalse())
return value
}
// GetInt64PtrVariable returns an Int64Ptr variable from the e2e config file.
func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 {
wCountStr := c.GetVariable(varName)
if wCountStr == "" {
return nil
}
wCount, err := strconv.ParseInt(wCountStr, 10, 64)
Expect(err).NotTo(HaveOccurred())
return pointer.Int64(wCount)
}
// GetInt32PtrVariable returns an Int32Ptr variable from the e2e config file.
func (c *E2EConfig) GetInt32PtrVariable(varName string) *int32 {
wCountStr := c.GetVariable(varName)
if wCountStr == "" {
return nil
}
wCount, err := strconv.ParseUint(wCountStr, 10, 32)
Expect(err).NotTo(HaveOccurred())
return pointer.Int32(int32(wCount))
}
// GetProviderVersions returns the sorted list of versions defined for a provider.
func (c *E2EConfig) GetProviderVersions(provider string) []string {
return c.getVersions(provider, "*")
}
// GetProvidersWithOldestVersion returns the list of providers with the oldest version.
func (c *E2EConfig) GetProvidersWithOldestVersion(providers ...string) []string {
ret := make([]string, 0, len(providers))
for _, p := range providers {
versions := c.getVersions(p, "*")
if len(versions) > 0 {
ret = append(ret, fmt.Sprintf("%s:%s", p, versions[0]))
}
}
return ret
}
// GetProviderLatestVersionsByContract returns the latest version for each contract defined for a provider.
func (c *E2EConfig) GetProviderLatestVersionsByContract(contract string, providers ...string) []string {
ret := make([]string, 0, len(providers))
for _, p := range providers {
versions := c.getVersions(p, contract)
if len(versions) > 0 {
ret = append(ret, fmt.Sprintf("%s:%s", p, versions[len(versions)-1]))
}
}
return ret
}
func (c *E2EConfig) getVersions(provider string, contract string) []string {
versions := []string{}
for _, p := range c.Providers {
if p.Name == provider {
for _, v := range p.Versions {
if contract == "*" || v.Contract == contract {
versions = append(versions, v.Name)
}
}
}
}
sort.Slice(versions, func(i, j int) bool {
// NOTE: Ignoring errors because the validity of the format is ensured by Validation.
vI, _ := version.ParseSemantic(versions[i])
vJ, _ := version.ParseSemantic(versions[j])
return vI.LessThan(vJ)
})
return versions
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logger
import (
"bufio"
"os"
"path/filepath"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
)
// Provides a log_file that can be used to store the logs generated by clusterctl actions.
// OpenLogFileInput is the input for OpenLogFile.
type OpenLogFileInput struct {
LogFolder string
Name string
}
// OpenLogFile opens a new log file.
func OpenLogFile(input OpenLogFileInput) *LogFile {
filePath := filepath.Join(input.LogFolder, input.Name)
Expect(os.MkdirAll(filepath.Dir(filePath), 0750)).To(Succeed(), "Failed to create log folder %s", filepath.Dir(filePath))
f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) //nolint:gosec // No security issue: filepath is safe.
Expect(err).ToNot(HaveOccurred(), "Failed to create log file %s", filePath)
return &LogFile{
name: input.Name,
file: f,
Writer: bufio.NewWriter(f),
}
}
// LogFile is a log file.
type LogFile struct {
name string
file *os.File
*bufio.Writer
}
// Name returns the name of the log file.
func (f *LogFile) Name() string {
return f.name
}
// Flush flushes the log file.
func (f *LogFile) Flush() {
Expect(f.Writer.Flush()).To(Succeed(), "Failed to flush log %s", f.name)
}
// Close closes the log file.
func (f *LogFile) Close() {
f.Flush()
Expect(f.file.Close()).To(Succeed(), "Failed to close log %s", f.name)
}
// Logger returns a logger that writes to the log file.
func (f *LogFile) Logger() logr.Logger {
return logr.New(&logger{writer: f})
}

View File

@ -0,0 +1,148 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package logger implements clusterctl logging functionality.
package logger
import (
"encoding/json"
"fmt"
"io"
"github.com/go-logr/logr"
"github.com/pkg/errors"
)
// Provides a logr.Logger to use during e2e tests.
type logger struct {
writer io.Writer
values []interface{}
}
var _ logr.LogSink = &logger{}
func (l *logger) Init(info logr.RuntimeInfo) {
}
func (l *logger) Enabled(level int) bool {
return true
}
func (l *logger) Info(level int, msg string, kvs ...interface{}) {
values := copySlice(l.values)
values = append(values, kvs...)
values = append(values, "msg", msg)
f, err := flatten(values)
if err != nil {
panic(err)
}
fmt.Fprintln(l.writer, f)
}
func (l *logger) Error(err error, msg string, kvs ...interface{}) {
panic("using log.Error is deprecated in clusterctl")
}
func (l *logger) V(level int) logr.LogSink {
nl := l.clone()
return nl
}
func (l *logger) WithName(name string) logr.LogSink {
panic("using log.WithName is deprecated in clusterctl")
}
func (l *logger) WithValues(kvList ...interface{}) logr.LogSink {
nl := l.clone()
nl.values = append(nl.values, kvList...)
return nl
}
func (l *logger) clone() *logger {
return &logger{
writer: l.writer,
values: copySlice(l.values),
}
}
func copySlice(in []interface{}) []interface{} {
out := make([]interface{}, len(in))
copy(out, in)
return out
}
func flatten(values []interface{}) (string, error) {
var msgValue string
var errorValue error
if len(values)%2 == 1 {
return "", errors.New("log entry cannot have odd number off keyAndValues")
}
keys := make([]string, 0, len(values)/2)
val := make(map[string]interface{}, len(values)/2)
for i := 0; i < len(values); i += 2 {
k, ok := values[i].(string)
if !ok {
panic(fmt.Sprintf("key is not a string: %s", values[i]))
}
var v interface{}
if i+1 < len(values) {
v = values[i+1]
}
switch k {
case "msg":
msgValue, ok = v.(string)
if !ok {
panic(fmt.Sprintf("the msg value is not of type string: %s", v))
}
case "error":
errorValue, ok = v.(error)
if !ok {
panic(fmt.Sprintf("the error value is not of type error: %s", v))
}
default:
if _, ok := val[k]; !ok {
keys = append(keys, k)
}
val[k] = v
}
}
str := ""
str += msgValue
if errorValue != nil {
if msgValue != "" {
str += ": "
}
str += errorValue.Error()
}
for _, k := range keys {
prettyValue, err := pretty(val[k])
if err != nil {
return "", err
}
str += fmt.Sprintf(" %s=%s", k, prettyValue)
}
return str, nil
}
func pretty(value interface{}) (string, error) {
jb, err := json.Marshal(value)
if err != nil {
return "", errors.Wrapf(err, "Failed to marshal %s", value)
}
return string(jb), nil
}

View File

@ -0,0 +1,284 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterctl
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/blang/semver"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework/exec"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
)
const (
fileURIScheme = "file"
httpURIScheme = "http"
httpsURIScheme = "https"
)
// RepositoryFileTransformation is a helpers for managing a clusterctl
// local repository to be used for running e2e tests in isolation.
type RepositoryFileTransformation func([]byte) ([]byte, error)
// CreateRepositoryInput is the input for CreateRepository.
type CreateRepositoryInput struct {
RepositoryFolder string
E2EConfig *E2EConfig
FileTransformations []RepositoryFileTransformation
}
// RegisterClusterResourceSetConfigMapTransformation registers a FileTransformations that injects a manifests file into
// a ConfigMap that defines a ClusterResourceSet resource.
//
// NOTE: this transformation is specifically designed for replacing "data: ${envSubstVar}".
func (i *CreateRepositoryInput) RegisterClusterResourceSetConfigMapTransformation(manifestPath, envSubstVar string) {
Byf("Reading the ClusterResourceSet manifest %s", manifestPath)
manifestData, err := os.ReadFile(manifestPath) //nolint:gosec
Expect(err).ToNot(HaveOccurred(), "Failed to read the ClusterResourceSet manifest file")
Expect(manifestData).ToNot(BeEmpty(), "ClusterResourceSet manifest file should not be empty")
i.FileTransformations = append(i.FileTransformations, func(template []byte) ([]byte, error) {
oldData := fmt.Sprintf("data: ${%s}", envSubstVar)
newData := "data:\n"
newData += " resources: |\n"
for _, l := range strings.Split(string(manifestData), "\n") {
newData += strings.Repeat(" ", 4) + l + "\n"
}
return bytes.ReplaceAll(template, []byte(oldData), []byte(newData)), nil
})
}
const clusterctlConfigFileName = "clusterctl-config.yaml"
const clusterctlConfigV1_2FileName = "clusterctl-config.v1.2.yaml"
// CreateRepository creates a clusterctl local repository based on the e2e test config, and the returns the path
// to a clusterctl config file to be used for working with such repository.
func CreateRepository(ctx context.Context, input CreateRepositoryInput) string {
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling CreateRepository")
Expect(os.MkdirAll(input.RepositoryFolder, 0750)).To(Succeed(), "Failed to create the clusterctl local repository folder %s", input.RepositoryFolder)
providers := []providerConfig{}
providersV1_2 := []providerConfig{}
for _, provider := range input.E2EConfig.Providers {
providerLabel := clusterctlv1.ManifestLabel(provider.Name, clusterctlv1.ProviderType(provider.Type))
providerURL := filepath.Join(input.RepositoryFolder, providerLabel, "latest", "components.yaml")
for _, version := range provider.Versions {
manifest, err := YAMLForComponentSource(ctx, version)
Expect(err).ToNot(HaveOccurred(), "Failed to generate the manifest for %q / %q", providerLabel, version.Name)
sourcePath := filepath.Join(input.RepositoryFolder, providerLabel, version.Name)
Expect(os.MkdirAll(sourcePath, 0750)).To(Succeed(), "Failed to create the clusterctl local repository folder for %q / %q", providerLabel, version.Name)
filePath := filepath.Join(sourcePath, "components.yaml")
Expect(os.WriteFile(filePath, manifest, 0600)).To(Succeed(), "Failed to write manifest in the clusterctl local repository for %q / %q", providerLabel, version.Name)
destinationPath := filepath.Join(input.RepositoryFolder, providerLabel, version.Name, "components.yaml")
allFiles := append(provider.Files, version.Files...)
for _, file := range allFiles {
data, err := os.ReadFile(file.SourcePath)
Expect(err).ToNot(HaveOccurred(), "Failed to read file %q / %q", provider.Name, file.SourcePath)
// Applies FileTransformations if defined
for _, t := range input.FileTransformations {
data, err = t(data)
Expect(err).ToNot(HaveOccurred(), "Failed to apply transformation func template %q", file)
}
destinationFile := filepath.Join(filepath.Dir(destinationPath), file.TargetName)
Expect(os.WriteFile(destinationFile, data, 0600)).To(Succeed(), "Failed to write clusterctl local repository file %q / %q", provider.Name, file.TargetName)
}
}
p := providerConfig{
Name: provider.Name,
URL: providerURL,
Type: provider.Type,
}
providers = append(providers, p)
}
// set this path to an empty file under the repository path, so test can run in isolation without user's overrides kicking in
overridePath := filepath.Join(input.RepositoryFolder, "overrides")
Expect(os.MkdirAll(overridePath, 0750)).To(Succeed(), "Failed to create the clusterctl overrides folder %q", overridePath)
// creates a clusterctl config file to be used for working with such repository
clusterctlConfigFile := &clusterctlConfig{
Path: filepath.Join(input.RepositoryFolder, clusterctlConfigFileName),
Values: map[string]interface{}{
"providers": providers,
"overridesFolder": overridePath,
},
}
for key := range input.E2EConfig.Variables {
clusterctlConfigFile.Values[key] = input.E2EConfig.GetVariable(key)
}
clusterctlConfigFile.write()
// creates a clusterctl config file to be used for working with such repository with only the providers supported in clusterctl < v1.3
clusterctlConfigFileV1_2 := &clusterctlConfig{
Path: filepath.Join(input.RepositoryFolder, clusterctlConfigV1_2FileName),
Values: map[string]interface{}{
"providers": providersV1_2,
"overridesFolder": overridePath,
},
}
for key := range input.E2EConfig.Variables {
clusterctlConfigFileV1_2.Values[key] = input.E2EConfig.GetVariable(key)
}
clusterctlConfigFileV1_2.write()
return clusterctlConfigFile.Path
}
// copyAndAmendClusterctlConfigInput is the input for copyAndAmendClusterctlConfig.
type copyAndAmendClusterctlConfigInput struct {
ClusterctlConfigPath string
OutputPath string
Variables map[string]string
}
// copyAndAmendClusterctlConfig copies the clusterctl-config from ClusterctlConfigPath to
// OutputPath and adds the given Variables.
func copyAndAmendClusterctlConfig(_ context.Context, input copyAndAmendClusterctlConfigInput) {
// Read clusterctl config from ClusterctlConfigPath.
clusterctlConfigFile := &clusterctlConfig{
Path: input.ClusterctlConfigPath,
}
clusterctlConfigFile.read()
// Overwrite variables.
if clusterctlConfigFile.Values == nil {
clusterctlConfigFile.Values = map[string]interface{}{}
}
for key, value := range input.Variables {
clusterctlConfigFile.Values[key] = value
}
// Write clusterctl config to OutputPath.
clusterctlConfigFile.Path = input.OutputPath
clusterctlConfigFile.write()
}
// AdjustConfigPathForBinary adjusts the clusterctlConfigPath in case the clusterctl version v1.3.
func AdjustConfigPathForBinary(clusterctPath, clusterctlConfigPath string) string {
clusterctl := exec.NewCommand(
exec.WithCommand(clusterctPath),
exec.WithArgs("version", "--output", "short"),
)
stdout, stderr, err := clusterctl.Run(context.Background())
if err != nil {
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl version:\nstdout:\n%s\nstderr:\n%s", string(stdout), string(stderr))
}
data := stdout
version, err := semver.ParseTolerant(string(data))
if err != nil {
Expect(err).ToNot(HaveOccurred(), "clusterctl version returned an invalid version: %s", string(data))
}
if version.LT(semver.MustParse("1.3.0")) {
return strings.Replace(clusterctlConfigPath, clusterctlConfigFileName, clusterctlConfigV1_2FileName, -1)
}
return clusterctlConfigPath
}
// YAMLForComponentSource returns the YAML for the provided component source.
func YAMLForComponentSource(ctx context.Context, source ProviderVersionSource) ([]byte, error) {
var data []byte
switch source.Type {
case URLSource:
buf, err := getComponentSourceFromURL(ctx, source)
if err != nil {
return nil, errors.Wrap(err, "failed to get component source YAML from URL")
}
data = buf
case KustomizeSource:
// Set Path of kustomize binary using CAPI_KUSTOMIZE_PATH env
kustomizePath, ok := os.LookupEnv("CAPI_KUSTOMIZE_PATH")
if !ok {
kustomizePath = "kustomize"
}
kustomize := exec.NewCommand(
exec.WithCommand(kustomizePath),
exec.WithArgs("build", source.Value))
stdout, stderr, err := kustomize.Run(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to execute kustomize: %s", stderr)
}
data = stdout
default:
return nil, errors.Errorf("invalid type: %q", source.Type)
}
for _, replacement := range source.Replacements {
rx, err := regexp.Compile(replacement.Old)
if err != nil {
return nil, err
}
data = rx.ReplaceAll(data, []byte(replacement.New))
}
return data, nil
}
// getComponentSourceFromURL fetches contents of component source YAML file from provided URL source.
func getComponentSourceFromURL(ctx context.Context, source ProviderVersionSource) ([]byte, error) {
var buf []byte
u, err := url.Parse(source.Value)
if err != nil {
return nil, err
}
// url.Parse always lower cases scheme
switch u.Scheme {
case "", fileURIScheme:
buf, err = os.ReadFile(u.Path)
if err != nil {
return nil, errors.Wrap(err, "failed to read file")
}
case httpURIScheme, httpsURIScheme:
req, err := http.NewRequestWithContext(ctx, http.MethodGet, source.Value, http.NoBody)
if err != nil {
return nil, errors.Wrapf(err, "failed to get %s: failed to create request", source.Value)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Wrapf(err, "failed to get %s", source.Value)
}
defer resp.Body.Close()
buf, err = io.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "failed to get %s: failed to read body", source.Value)
}
default:
return nil, errors.Errorf("unknown scheme for component source %q: allowed values are file, http, https", u.Scheme)
}
return buf, nil
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
)
// GetControllerDeploymentsInput is the input for GetControllerDeployments.
type GetControllerDeploymentsInput struct {
Lister Lister
ExcludeNamespaces []string
}
// GetControllerDeployments returns all the deployment for the cluster API controllers existing in a management cluster.
func GetControllerDeployments(ctx context.Context, input GetControllerDeploymentsInput) []*appsv1.Deployment {
deploymentList := &appsv1.DeploymentList{}
Eventually(func() error {
return input.Lister.List(ctx, deploymentList, capiProviderOptions()...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list deployments for the cluster API controllers")
deployments := make([]*appsv1.Deployment, 0, len(deploymentList.Items))
for i := range deploymentList.Items {
d := &deploymentList.Items[i]
if !skipDeployment(d, input.ExcludeNamespaces) {
deployments = append(deployments, d)
}
}
return deployments
}
func skipDeployment(d *appsv1.Deployment, excludeNamespaces []string) bool {
if !d.DeletionTimestamp.IsZero() {
return true
}
for _, n := range excludeNamespaces {
if d.Namespace == n {
return true
}
}
return false
}

View File

@ -0,0 +1,431 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// CreateK3sControlPlaneInput is the input for CreateK3sControlPlane.
type CreateK3sControlPlaneInput struct {
Creator Creator
ControlPlane *infracontrolplanev1.K3sControlPlane
MachineTemplate client.Object
}
// CreateK3sControlPlane creates the control plane object and necessary dependencies.
func CreateK3sControlPlane(ctx context.Context, input CreateK3sControlPlaneInput, intervals ...interface{}) {
By("creating the machine template")
Eventually(func() error {
return input.Creator.Create(ctx, input.MachineTemplate)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create MachineTemplate %s", input.MachineTemplate.GetName())
By("creating a K3sControlPlane")
Eventually(func() error {
err := input.Creator.Create(ctx, input.ControlPlane)
if err != nil {
log.Logf("Failed to create the K3sControlPlane: %+v", err)
}
return err
}, intervals...).Should(Succeed(), "Failed to create the K3sControlPlane %s", klog.KObj(input.ControlPlane))
}
// GetK3sControlPlaneByClusterInput is the input for GetK3sControlPlaneByCluster.
type GetK3sControlPlaneByClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}
// GetK3sControlPlaneByCluster returns the K3sControlPlane objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetK3sControlPlaneByCluster(ctx context.Context, input GetK3sControlPlaneByClusterInput) *infracontrolplanev1.K3sControlPlane {
controlPlaneList := &infracontrolplanev1.K3sControlPlaneList{}
Eventually(func() error {
return input.Lister.List(ctx, controlPlaneList, byClusterOptions(input.ClusterName, input.Namespace)...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list K3sControlPlane object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
Expect(len(controlPlaneList.Items)).ToNot(BeNumerically(">", 1), "Cluster %s should not have more than 1 K3sControlPlane object", klog.KRef(input.Namespace, input.ClusterName))
if len(controlPlaneList.Items) == 1 {
return &controlPlaneList.Items[0]
}
return nil
}
// WaitForK3sControlPlaneMachinesToExistInput is the input for WaitForK3sControlPlaneMachinesToExist.
type WaitForK3sControlPlaneMachinesToExistInput struct {
Lister Lister
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
}
// WaitForK3sControlPlaneMachinesToExist will wait until all control plane machines have node refs.
func WaitForK3sControlPlaneMachinesToExist(ctx context.Context, input WaitForK3sControlPlaneMachinesToExistInput, intervals ...interface{}) {
By("Waiting for all control plane nodes to exist")
inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace)
// ControlPlane labels
matchClusterListOption := client.MatchingLabels{
clusterv1.MachineControlPlaneLabelName: "",
clusterv1.ClusterLabelName: input.Cluster.Name,
}
Eventually(func() (int, error) {
machineList := &clusterv1.MachineList{}
if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil {
log.Logf("Failed to list the machines: %+v", err)
return 0, err
}
count := 0
for _, machine := range machineList.Items {
if machine.Status.NodeRef != nil {
count++
}
}
return count, nil
}, intervals...).Should(Equal(int(*input.ControlPlane.Spec.Replicas)), "Timed out waiting for %d control plane machines to exist", int(*input.ControlPlane.Spec.Replicas))
}
// WaitForOneK3sControlPlaneMachineToExistInput is the input for WaitForK3sControlPlaneMachinesToExist.
type WaitForOneK3sControlPlaneMachineToExistInput struct {
Lister Lister
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
}
// WaitForOneK3sControlPlaneMachineToExist will wait until all control plane machines have node refs.
func WaitForOneK3sControlPlaneMachineToExist(ctx context.Context, input WaitForOneK3sControlPlaneMachineToExistInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForOneK3sControlPlaneMachineToExist")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForOneK3sControlPlaneMachineToExist")
Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling WaitForOneK3sControlPlaneMachineToExist")
By("Waiting for one control plane node to exist")
inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace)
// ControlPlane labels
matchClusterListOption := client.MatchingLabels{
clusterv1.MachineControlPlaneLabelName: "",
clusterv1.ClusterLabelName: input.Cluster.Name,
}
Eventually(func() (bool, error) {
machineList := &clusterv1.MachineList{}
if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil {
log.Logf("Failed to list the machines: %+v", err)
return false, err
}
count := 0
for _, machine := range machineList.Items {
if machine.Status.NodeRef != nil {
count++
}
}
return count > 0, nil
}, intervals...).Should(BeTrue(), "No Control Plane machines came into existence. ")
}
// WaitForControlPlaneToBeReadyInput is the input for WaitForControlPlaneToBeReady.
type WaitForControlPlaneToBeReadyInput struct {
Getter Getter
ControlPlane *infracontrolplanev1.K3sControlPlane
}
// WaitForControlPlaneToBeReady will wait for a control plane to be ready.
func WaitForControlPlaneToBeReady(ctx context.Context, input WaitForControlPlaneToBeReadyInput, intervals ...interface{}) {
By("Waiting for the control plane to be ready")
controlplane := &infracontrolplanev1.K3sControlPlane{}
Eventually(func() (infracontrolplanev1.K3sControlPlane, error) {
key := client.ObjectKey{
Namespace: input.ControlPlane.GetNamespace(),
Name: input.ControlPlane.GetName(),
}
if err := input.Getter.Get(ctx, key, controlplane); err != nil {
return *controlplane, errors.Wrapf(err, "failed to get KCP")
}
return *controlplane, nil
}, intervals...).Should(MatchFields(IgnoreExtras, Fields{
"Status": MatchFields(IgnoreExtras, Fields{
"Ready": BeTrue(),
}),
}), PrettyPrint(controlplane)+"\n")
}
// AssertControlPlaneFailureDomainsInput is the input for AssertControlPlaneFailureDomains.
type AssertControlPlaneFailureDomainsInput struct {
Lister Lister
Cluster *clusterv1.Cluster
}
// AssertControlPlaneFailureDomains will look at all control plane machines and see what failure domains they were
// placed in. If machines were placed in unexpected or wrong failure domains the expectation will fail.
func AssertControlPlaneFailureDomains(ctx context.Context, input AssertControlPlaneFailureDomainsInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for AssertControlPlaneFailureDomains")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertControlPlaneFailureDomains")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling AssertControlPlaneFailureDomains")
By("Checking all the control plane machines are in the expected failure domains")
controlPlaneFailureDomains := sets.NewString()
for fd, fdSettings := range input.Cluster.Status.FailureDomains {
if fdSettings.ControlPlane {
controlPlaneFailureDomains.Insert(fd)
}
}
// Look up all the control plane machines.
inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterLabelName: input.Cluster.Name,
clusterv1.MachineControlPlaneLabelName: "",
}
machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Couldn't list control-plane machines for the cluster %q", input.Cluster.Name)
for _, machine := range machineList.Items {
if machine.Spec.FailureDomain != nil {
machineFD := *machine.Spec.FailureDomain
if !controlPlaneFailureDomains.Has(machineFD) {
Fail(fmt.Sprintf("Machine %s is in the %q failure domain, expecting one of the failure domain defined at cluster level", machine.Name, machineFD))
}
}
}
}
// DiscoveryAndWaitForControlPlaneInitializedInput is the input type for DiscoveryAndWaitForControlPlaneInitialized.
type DiscoveryAndWaitForControlPlaneInitializedInput struct {
Lister Lister
Cluster *clusterv1.Cluster
}
// DiscoveryAndWaitForControlPlaneInitialized discovers the K3sControlPlane object attached to a cluster and waits for it to be initialized.
func DiscoveryAndWaitForControlPlaneInitialized(ctx context.Context, input DiscoveryAndWaitForControlPlaneInitializedInput, intervals ...interface{}) *infracontrolplanev1.K3sControlPlane {
Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForControlPlaneInitialized")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForControlPlaneInitialized")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForControlPlaneInitialized")
var controlPlane *infracontrolplanev1.K3sControlPlane
Eventually(func(g Gomega) {
controlPlane = GetK3sControlPlaneByCluster(ctx, GetK3sControlPlaneByClusterInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
g.Expect(controlPlane).ToNot(BeNil())
}, "10s", "1s").Should(Succeed(), "Couldn't get the control plane for the cluster %s", klog.KObj(input.Cluster))
log.Logf("Waiting for the first control plane machine managed by %s to be provisioned", klog.KObj(controlPlane))
WaitForOneK3sControlPlaneMachineToExist(ctx, WaitForOneK3sControlPlaneMachineToExistInput{
Lister: input.Lister,
Cluster: input.Cluster,
ControlPlane: controlPlane,
}, intervals...)
return controlPlane
}
// WaitForControlPlaneAndMachinesReadyInput is the input type for WaitForControlPlaneAndMachinesReady.
type WaitForControlPlaneAndMachinesReadyInput struct {
GetLister GetLister
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
}
// WaitForControlPlaneAndMachinesReady waits for a K3sControlPlane object to be ready (all the machine provisioned and one node ready).
func WaitForControlPlaneAndMachinesReady(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneReady")
Expect(input.GetLister).ToNot(BeNil(), "Invalid argument. input.GetLister can't be nil when calling WaitForControlPlaneReady")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForControlPlaneReady")
Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling WaitForControlPlaneReady")
if input.ControlPlane.Spec.Replicas != nil && int(*input.ControlPlane.Spec.Replicas) > 1 {
log.Logf("Waiting for the remaining control plane machines managed by %s to be provisioned", klog.KObj(input.ControlPlane))
WaitForK3sControlPlaneMachinesToExist(ctx, WaitForK3sControlPlaneMachinesToExistInput{
Lister: input.GetLister,
Cluster: input.Cluster,
ControlPlane: input.ControlPlane,
}, intervals...)
}
log.Logf("Waiting for control plane %s to be ready (implies underlying nodes to be ready as well)", klog.KObj(input.ControlPlane))
waitForControlPlaneToBeReadyInput := WaitForControlPlaneToBeReadyInput{
Getter: input.GetLister,
ControlPlane: input.ControlPlane,
}
WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput, intervals...)
AssertControlPlaneFailureDomains(ctx, AssertControlPlaneFailureDomainsInput{
Lister: input.GetLister,
Cluster: input.Cluster,
})
}
// UpgradeControlPlaneAndWaitForUpgradeInput is the input type for UpgradeControlPlaneAndWaitForUpgrade.
type UpgradeControlPlaneAndWaitForUpgradeInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
KubernetesUpgradeVersion string
UpgradeMachineTemplate *string
EtcdImageTag string
DNSImageTag string
WaitForMachinesToBeUpgraded []interface{}
WaitForDNSUpgrade []interface{}
WaitForKubeProxyUpgrade []interface{}
WaitForEtcdUpgrade []interface{}
}
// UpgradeControlPlaneAndWaitForUpgrade upgrades a K3sControlPlane and waits for it to be upgraded.
func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeControlPlaneAndWaitForUpgradeInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.EtcdImageTag).ToNot(BeNil(), "Invalid argument. input.EtcdImageTag can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade")
Expect(input.DNSImageTag).ToNot(BeNil(), "Invalid argument. input.DNSImageTag can't be empty when calling UpgradeControlPlaneAndWaitForUpgrade")
mgmtClient := input.ClusterProxy.GetClient()
log.Logf("Patching the new kubernetes version to KCP")
patchHelper, err := patch.NewHelper(input.ControlPlane, mgmtClient)
Expect(err).ToNot(HaveOccurred())
input.ControlPlane.Spec.Version = input.KubernetesUpgradeVersion
if input.UpgradeMachineTemplate != nil {
input.ControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = *input.UpgradeMachineTemplate
}
// If the ClusterConfiguration is not specified, create an empty one.
if input.ControlPlane.Spec.K3sConfigSpec.ServerConfiguration == nil {
input.ControlPlane.Spec.K3sConfigSpec.ServerConfiguration = new(infrabootstrapv1.ServerConfiguration)
}
if input.ControlPlane.Spec.K3sConfigSpec.ServerConfiguration.Database.ClusterInit == nil {
input.ControlPlane.Spec.K3sConfigSpec.ServerConfiguration.Database.ClusterInit = pointer.Bool(true)
}
Eventually(func() error {
return patchHelper.Patch(ctx, input.ControlPlane)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch the new kubernetes version to KCP %s", klog.KObj(input.ControlPlane))
log.Logf("Waiting for control-plane machines to have the upgraded kubernetes version")
WaitForControlPlaneMachinesToBeUpgraded(ctx, WaitForControlPlaneMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: input.Cluster,
MachineCount: int(*input.ControlPlane.Spec.Replicas),
KubernetesUpgradeVersion: input.KubernetesUpgradeVersion,
}, input.WaitForMachinesToBeUpgraded...)
log.Logf("Waiting for kube-proxy to have the upgraded kubernetes version")
workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name)
workloadClient := workloadCluster.GetClient()
WaitForKubeProxyUpgrade(ctx, WaitForKubeProxyUpgradeInput{
Getter: workloadClient,
KubernetesVersion: input.KubernetesUpgradeVersion,
}, input.WaitForKubeProxyUpgrade...)
log.Logf("Waiting for CoreDNS to have the upgraded image tag")
WaitForDNSUpgrade(ctx, WaitForDNSUpgradeInput{
Getter: workloadClient,
DNSVersion: input.DNSImageTag,
}, input.WaitForDNSUpgrade...)
log.Logf("Waiting for etcd to have the upgraded image tag")
lblSelector, err := labels.Parse("component=etcd")
Expect(err).ToNot(HaveOccurred())
WaitForPodListCondition(ctx, WaitForPodListConditionInput{
Lister: workloadClient,
ListOptions: &client.ListOptions{LabelSelector: lblSelector},
Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
}, input.WaitForEtcdUpgrade...)
}
// controlPlaneMachineOptions returns a set of ListOptions that allows to get all machine objects belonging to control plane.
func controlPlaneMachineOptions() []client.ListOption {
return []client.ListOption{
client.HasLabels{clusterv1.MachineControlPlaneLabelName},
}
}
// ScaleAndWaitControlPlaneInput is the input for ScaleAndWaitControlPlane.
type ScaleAndWaitControlPlaneInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
ControlPlane *infracontrolplanev1.K3sControlPlane
Replicas int32
WaitForControlPlane []interface{}
}
// ScaleAndWaitControlPlane scales KCP and waits until all machines have node ref and equal to Replicas.
func ScaleAndWaitControlPlane(ctx context.Context, input ScaleAndWaitControlPlaneInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for ScaleAndWaitControlPlane")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ScaleAndWaitControlPlane")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ScaleAndWaitControlPlane")
patchHelper, err := patch.NewHelper(input.ControlPlane, input.ClusterProxy.GetClient())
Expect(err).ToNot(HaveOccurred())
scaleBefore := pointer.Int32Deref(input.ControlPlane.Spec.Replicas, 0)
input.ControlPlane.Spec.Replicas = pointer.Int32(input.Replicas)
log.Logf("Scaling controlplane %s from %v to %v replicas", klog.KObj(input.ControlPlane), scaleBefore, input.Replicas)
Eventually(func() error {
return patchHelper.Patch(ctx, input.ControlPlane)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale controlplane %s from %v to %v replicas", klog.KObj(input.ControlPlane), scaleBefore, input.Replicas)
log.Logf("Waiting for correct number of replicas to exist")
Eventually(func() (int, error) {
kcpLabelSelector, err := metav1.ParseToLabelSelector(input.ControlPlane.Status.Selector)
if err != nil {
return -1, err
}
selector, err := metav1.LabelSelectorAsSelector(kcpLabelSelector)
if err != nil {
return -1, err
}
machines := &clusterv1.MachineList{}
if err := input.ClusterProxy.GetClient().List(ctx, machines, &client.ListOptions{LabelSelector: selector, Namespace: input.ControlPlane.Namespace}); err != nil {
return -1, err
}
nodeRefCount := 0
for _, machine := range machines.Items {
if machine.Status.NodeRef != nil {
nodeRefCount++
}
}
if len(machines.Items) != nodeRefCount {
return -1, errors.New("Machine count does not match existing nodes count")
}
return nodeRefCount, nil
}, input.WaitForControlPlane...).Should(Equal(int(input.Replicas)), "Timed out waiting for %d replicas to exist for control-plane %s", int(input.Replicas), klog.KObj(input.ControlPlane))
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"reflect"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsv1beta "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
infrabootstrapv1 "github.com/kubesphere/kubekey/bootstrap/k3s/api/v1beta1"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
)
// TryAddDefaultSchemes tries to add the following schemes:
// - Kubernetes corev1
// - Kubernetes appsv1
// - CAPI core
// - K3s Bootstrapper
// - K3s ControlPlane
//
// Any error that occurs when trying to add the schemes is ignored.
func TryAddDefaultSchemes(scheme *runtime.Scheme) {
// Add the core schemes.
_ = corev1.AddToScheme(scheme)
// Add the apps schemes.
_ = appsv1.AddToScheme(scheme)
// Add the core CAPI scheme.
_ = clusterv1.AddToScheme(scheme)
// Add the CAPI experiments scheme.
_ = expv1.AddToScheme(scheme)
_ = addonsv1.AddToScheme(scheme)
// Add the CAPI clusterctl scheme.
_ = clusterctlv1.AddToScheme(scheme)
// Add the core CAPI v1alpha3 scheme.
_ = clusterv1alpha3.AddToScheme(scheme)
// Add the k3s bootstrapper scheme.
_ = infrabootstrapv1.AddToScheme(scheme)
// Add the k3s controlplane scheme.
_ = infracontrolplanev1.AddToScheme(scheme)
// Add the api extensions (CRD) to the scheme.
_ = apiextensionsv1beta.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
// Add RuntimeSDK to the scheme.
_ = runtimev1.AddToScheme(scheme)
// Add rbac to the scheme.
_ = rbacv1.AddToScheme(scheme)
}
// ObjectToKind returns the Kind without the package prefix. Pass in a pointer to a struct
// This will panic if used incorrectly.
func ObjectToKind(i runtime.Object) string {
return reflect.ValueOf(i).Elem().Type().Name()
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"github.com/blang/semver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
containerutil "sigs.k8s.io/cluster-api/util/container"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// WaitForKubeProxyUpgradeInput is the input for WaitForKubeProxyUpgrade.
type WaitForKubeProxyUpgradeInput struct {
Getter Getter
KubernetesVersion string
}
// WaitForKubeProxyUpgrade waits until kube-proxy version matches with the kubernetes version. This is called during KCP upgrade.
func WaitForKubeProxyUpgrade(ctx context.Context, input WaitForKubeProxyUpgradeInput, intervals ...interface{}) {
By("Ensuring kube-proxy has the correct image")
parsedVersion, err := semver.ParseTolerant(input.KubernetesVersion)
Expect(err).ToNot(HaveOccurred())
// Beginning with kubernetes v1.25, kubernetes images including kube-proxy get published to registry.k8s.io instead of k8s.gcr.io.
// This ensures that the imageRepository setting gets patched to registry.k8s.io when upgrading from v1.24 or lower,
// but only if there was no imageRespository explicitly set at the K3sControlPlanes ClusterConfiguration.
// This follows the behavior of `kubeadm upgrade`.
wantKubeProxyRegistry := "registry.k8s.io"
if parsedVersion.LT(semver.Version{Major: 1, Minor: 25, Patch: 0, Pre: []semver.PRVersion{{VersionStr: "alpha"}}}) {
wantKubeProxyRegistry = "k8s.gcr.io"
}
wantKubeProxyImage := wantKubeProxyRegistry + "/kube-proxy:" + containerutil.SemverToOCIImageTag(input.KubernetesVersion)
Eventually(func() (bool, error) {
ds := &appsv1.DaemonSet{}
if err := input.Getter.Get(ctx, client.ObjectKey{Name: "kube-proxy", Namespace: metav1.NamespaceSystem}, ds); err != nil {
return false, err
}
if ds.Spec.Template.Spec.Containers[0].Image == wantKubeProxyImage {
return true, nil
}
return false, nil
}, intervals...).Should(BeTrue())
}

View File

@ -0,0 +1,518 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
"sigs.k8s.io/controller-runtime/pkg/client"
infracontrolplanev1 "github.com/kubesphere/kubekey/controlplane/k3s/api/v1beta1"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
const (
nodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
nodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
)
// WaitForDeploymentsAvailableInput is the input for WaitForDeploymentsAvailable.
type WaitForDeploymentsAvailableInput struct {
Getter Getter
Deployment *appsv1.Deployment
}
// WaitForDeploymentsAvailable waits until the Deployment has status.Available = True, that signals that
// all the desired replicas are in place.
// This can be used to check if Cluster API controllers installed in the management cluster are working.
func WaitForDeploymentsAvailable(ctx context.Context, input WaitForDeploymentsAvailableInput, intervals ...interface{}) {
Byf("Waiting for deployment %s to be available", klog.KObj(input.Deployment))
deployment := &appsv1.Deployment{}
Eventually(func() bool {
key := client.ObjectKey{
Namespace: input.Deployment.GetNamespace(),
Name: input.Deployment.GetName(),
}
if err := input.Getter.Get(ctx, key, deployment); err != nil {
return false
}
for _, c := range deployment.Status.Conditions {
if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue {
return true
}
}
return false
}, intervals...).Should(BeTrue(), func() string { return DescribeFailedDeployment(input, deployment) })
}
// DescribeFailedDeployment returns detailed output to help debug a deployment failure in e2e.
func DescribeFailedDeployment(input WaitForDeploymentsAvailableInput, deployment *appsv1.Deployment) string {
b := strings.Builder{}
b.WriteString(fmt.Sprintf("Deployment %s failed to get status.Available = True condition",
klog.KObj(input.Deployment)))
if deployment == nil {
b.WriteString("\nDeployment: nil\n")
} else {
b.WriteString(fmt.Sprintf("\nDeployment:\n%s\n", PrettyPrint(deployment)))
}
return b.String()
}
// WatchDeploymentLogsInput is the input for WatchDeploymentLogs.
type WatchDeploymentLogsInput struct {
GetLister GetLister
ClientSet *kubernetes.Clientset
Deployment *appsv1.Deployment
LogPath string
}
// logMetadata contains metadata about the logs.
// The format is very similar to the one used by promtail.
type logMetadata struct {
Job string `json:"job"`
Namespace string `json:"namespace"`
App string `json:"app"`
Pod string `json:"pod"`
Container string `json:"container"`
NodeName string `json:"node_name"`
Stream string `json:"stream"`
}
// WatchDeploymentLogs streams logs for all containers for all pods belonging to a deployment. Each container's logs are streamed
// in a separate goroutine so they can all be streamed concurrently. This only causes a test failure if there are errors
// retrieving the deployment, its pods, or setting up a log file. If there is an error with the log streaming itself,
// that does not cause the test to fail.
func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WatchControllerLogs")
Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for WatchControllerLogs")
Expect(input.Deployment).NotTo(BeNil(), "input.Deployment is required for WatchControllerLogs")
deployment := &appsv1.Deployment{}
key := client.ObjectKeyFromObject(input.Deployment)
Eventually(func() error {
return input.GetLister.Get(ctx, key, deployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get deployment %s", klog.KObj(input.Deployment))
selector, err := metav1.LabelSelectorAsMap(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred(), "Failed to Pods selector for deployment %s", klog.KObj(input.Deployment))
pods := &corev1.PodList{}
Expect(input.GetLister.List(ctx, pods, client.InNamespace(input.Deployment.Namespace), client.MatchingLabels(selector))).To(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
for _, pod := range pods.Items {
for _, container := range deployment.Spec.Template.Spec.Containers {
log.Logf("Creating log watcher for controller %s, pod %s, container %s", klog.KObj(input.Deployment), pod.Name, container.Name)
// Create log metadata file.
logMetadataFile := filepath.Clean(path.Join(input.LogPath, input.Deployment.Name, pod.Name, container.Name+"-log-metadata.json"))
Expect(os.MkdirAll(filepath.Dir(logMetadataFile), 0750)).To(Succeed())
metadata := logMetadata{
Job: input.Deployment.Namespace + "/" + input.Deployment.Name,
Namespace: input.Deployment.Namespace,
App: input.Deployment.Name,
Pod: pod.Name,
Container: container.Name,
NodeName: pod.Spec.NodeName,
Stream: "stderr",
}
metadataBytes, err := json.Marshal(&metadata)
Expect(err).To(BeNil())
Expect(os.WriteFile(logMetadataFile, metadataBytes, 0600)).To(Succeed())
// Watch each container's logs in a goroutine so we can stream them all concurrently.
go func(pod corev1.Pod, container corev1.Container) {
defer GinkgoRecover()
logFile := filepath.Clean(path.Join(input.LogPath, input.Deployment.Name, pod.Name, container.Name+".log"))
Expect(os.MkdirAll(filepath.Dir(logFile), 0750)).To(Succeed())
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
Expect(err).NotTo(HaveOccurred())
defer f.Close()
opts := &corev1.PodLogOptions{
Container: container.Name,
Follow: true,
}
podLogs, err := input.ClientSet.CoreV1().Pods(input.Deployment.Namespace).GetLogs(pod.Name, opts).Stream(ctx)
if err != nil {
// Failing to stream logs should not cause the test to fail
log.Logf("Error starting logs stream for pod %s, container %s: %v", klog.KRef(pod.Namespace, pod.Name), container.Name, err)
return
}
defer podLogs.Close()
out := bufio.NewWriter(f)
defer out.Flush()
_, err = out.ReadFrom(podLogs)
if err != nil && err != io.ErrUnexpectedEOF {
// Failing to stream logs should not cause the test to fail
log.Logf("Got error while streaming logs for pod %s, container %s: %v", klog.KRef(pod.Namespace, pod.Name), container.Name, err)
}
}(pod, container)
}
}
}
// WatchPodMetricsInput is the input for WatchPodMetrics.
type WatchPodMetricsInput struct {
GetLister GetLister
ClientSet *kubernetes.Clientset
Deployment *appsv1.Deployment
MetricsPath string
}
// WatchPodMetrics captures metrics from all pods every 5s. It expects to find port 8080 open on the controller.
func WatchPodMetrics(ctx context.Context, input WatchPodMetricsInput) {
// Dump machine metrics every 5 seconds
ticker := time.NewTicker(time.Second * 5)
Expect(ctx).NotTo(BeNil(), "ctx is required for dumpContainerMetrics")
Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for dumpContainerMetrics")
Expect(input.Deployment).NotTo(BeNil(), "input.Deployment is required for dumpContainerMetrics")
deployment := &appsv1.Deployment{}
key := client.ObjectKeyFromObject(input.Deployment)
Eventually(func() error {
return input.GetLister.Get(ctx, key, deployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get deployment %s", klog.KObj(input.Deployment))
selector, err := metav1.LabelSelectorAsMap(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred(), "Failed to Pods selector for deployment %s", klog.KObj(input.Deployment))
pods := &corev1.PodList{}
Eventually(func() error {
return input.GetLister.List(ctx, pods, client.InNamespace(input.Deployment.Namespace), client.MatchingLabels(selector))
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Pods for deployment %s", klog.KObj(input.Deployment))
go func() {
defer GinkgoRecover()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
dumpPodMetrics(ctx, input.ClientSet, input.MetricsPath, deployment.Name, pods)
}
}
}()
}
// dumpPodMetrics captures metrics from all pods. It expects to find port 8080 open on the controller.
func dumpPodMetrics(ctx context.Context, client *kubernetes.Clientset, metricsPath string, deploymentName string, pods *corev1.PodList) {
for _, pod := range pods.Items {
metricsDir := path.Join(metricsPath, deploymentName, pod.Name)
metricsFile := path.Join(metricsDir, "metrics.txt")
Expect(os.MkdirAll(metricsDir, 0750)).To(Succeed())
res := client.CoreV1().RESTClient().Get().
Namespace(pod.Namespace).
Resource("pods").
Name(fmt.Sprintf("%s:8080", pod.Name)).
SubResource("proxy").
Suffix("metrics").
Do(ctx)
data, err := res.Raw()
if err != nil {
// Failing to dump metrics should not cause the test to fail
data = []byte(fmt.Sprintf("Error retrieving metrics for pod %s: %v\n%s", klog.KRef(pod.Namespace, pod.Name), err, string(data)))
metricsFile = path.Join(metricsDir, "metrics-error.txt")
}
if err := os.WriteFile(metricsFile, data, 0600); err != nil {
// Failing to dump metrics should not cause the test to fail
log.Logf("Error writing metrics for pod %s: %v", klog.KRef(pod.Namespace, pod.Name), err)
}
}
}
// WaitForDNSUpgradeInput is the input for WaitForDNSUpgrade.
type WaitForDNSUpgradeInput struct {
Getter Getter
DNSVersion string
}
// WaitForDNSUpgrade waits until CoreDNS version matches with the CoreDNS upgrade version and all its replicas
// are ready for use with the upgraded version. This is called during KCP upgrade.
func WaitForDNSUpgrade(ctx context.Context, input WaitForDNSUpgradeInput, intervals ...interface{}) {
By("Ensuring CoreDNS has the correct image")
Eventually(func() (bool, error) {
d := &appsv1.Deployment{}
if err := input.Getter.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, d); err != nil {
return false, err
}
// NOTE: coredns image name has changed over time (k8s.gcr.io/coredns,
// k8s.gcr.io/coredns/coredns), so we are checking if the version actually changed.
if strings.HasSuffix(d.Spec.Template.Spec.Containers[0].Image, fmt.Sprintf(":%s", input.DNSVersion)) {
return true, nil
}
// check whether the upgraded CoreDNS replicas are available and ready for use.
if d.Status.ObservedGeneration >= d.Generation {
if d.Spec.Replicas != nil && d.Status.UpdatedReplicas == *d.Spec.Replicas && d.Status.AvailableReplicas == *d.Spec.Replicas {
return true, nil
}
}
return false, nil
}, intervals...).Should(BeTrue())
}
// DeployUnevictablePodInput is the input for DeployUnevictablePod.
type DeployUnevictablePodInput struct {
WorkloadClusterProxy ClusterProxy
ControlPlane *infracontrolplanev1.K3sControlPlane
DeploymentName string
Namespace string
WaitForDeploymentAvailableInterval []interface{}
}
// DeployUnevictablePod deploys a pod that is not evictable.
func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput) {
Expect(input.DeploymentName).ToNot(BeNil(), "Need a deployment name in DeployUnevictablePod")
Expect(input.Namespace).ToNot(BeNil(), "Need a namespace in DeployUnevictablePod")
Expect(input.WorkloadClusterProxy).ToNot(BeNil(), "Need a workloadClusterProxy in DeployUnevictablePod")
EnsureNamespace(ctx, input.WorkloadClusterProxy.GetClient(), input.Namespace)
workloadDeployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: input.DeploymentName,
Namespace: input.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(4),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "nonstop",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "nonstop",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "web",
Image: "nginx:1.12",
Ports: []corev1.ContainerPort{
{
Name: "http",
Protocol: corev1.ProtocolTCP,
ContainerPort: 80,
},
},
},
},
},
},
},
}
workloadClient := input.WorkloadClusterProxy.GetClientSet()
if input.ControlPlane != nil {
var serverVersion *version.Info
Eventually(func() error {
var err error
serverVersion, err = workloadClient.ServerVersion()
return err
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "failed to get server version")
// Use the control-plane label for Kubernetes version >= v1.20.0.
if utilversion.MustParseGeneric(serverVersion.String()).AtLeast(utilversion.MustParseGeneric("v1.20.0")) {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleControlPlane: ""}
} else {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleOldControlPlane: ""}
}
workloadDeployment.Spec.Template.Spec.Tolerations = []corev1.Toleration{
{
Key: nodeRoleOldControlPlane,
Effect: "NoSchedule",
},
{
Key: nodeRoleControlPlane,
Effect: "NoSchedule",
},
}
}
AddDeploymentToWorkloadCluster(ctx, AddDeploymentToWorkloadClusterInput{
Namespace: input.Namespace,
ClientSet: workloadClient,
Deployment: workloadDeployment,
})
// TODO(oscr): Remove when Kubernetes 1.20 support is dropped.
serverVersion, err := workloadClient.ServerVersion()
Expect(err).ToNot(HaveOccurred(), "Failed to get Kubernetes version for workload")
// If Kubernetes < 1.21.0 we need to use PDB from v1beta1
if utilversion.MustParseGeneric(serverVersion.String()).LessThan(utilversion.MustParseGeneric("v1.21.0")) {
budgetV1Beta1 := &v1beta1.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{
Kind: "PodDisruptionBudget",
APIVersion: "policy/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: input.DeploymentName,
Namespace: input.Namespace,
},
Spec: v1beta1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "nonstop",
},
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
StrVal: "1",
},
},
}
AddPodDisruptionBudgetV1Beta1(ctx, AddPodDisruptionBudgetInputV1Beta1{
Namespace: input.Namespace,
ClientSet: workloadClient,
Budget: budgetV1Beta1,
})
// If Kubernetes >= 1.21.0 then we need to use PDB from v1
} else {
budget := &policyv1.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{
Kind: "PodDisruptionBudget",
APIVersion: "policy/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: input.DeploymentName,
Namespace: input.Namespace,
},
Spec: policyv1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "nonstop",
},
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
StrVal: "1",
},
},
}
AddPodDisruptionBudget(ctx, AddPodDisruptionBudgetInput{
Namespace: input.Namespace,
ClientSet: workloadClient,
Budget: budget,
})
}
WaitForDeploymentsAvailable(ctx, WaitForDeploymentsAvailableInput{
Getter: input.WorkloadClusterProxy.GetClient(),
Deployment: workloadDeployment,
}, input.WaitForDeploymentAvailableInterval...)
}
// AddDeploymentToWorkloadClusterInput is the input for AddDeploymentToWorkloadCluster.
type AddDeploymentToWorkloadClusterInput struct {
ClientSet *kubernetes.Clientset
Deployment *appsv1.Deployment
Namespace string
}
// AddDeploymentToWorkloadCluster adds a deployment to the workload cluster.
func AddDeploymentToWorkloadCluster(ctx context.Context, input AddDeploymentToWorkloadClusterInput) {
Eventually(func() error {
result, err := input.ClientSet.AppsV1().Deployments(input.Namespace).Create(ctx, input.Deployment, metav1.CreateOptions{})
if result != nil && err == nil {
return nil
}
return fmt.Errorf("deployment %s not successfully created in workload cluster: %v", klog.KObj(input.Deployment), err)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create deployment %s in workload cluster", klog.KObj(input.Deployment))
}
// AddPodDisruptionBudgetInput is the input for AddPodDisruptionBudget.
type AddPodDisruptionBudgetInput struct {
ClientSet *kubernetes.Clientset
Budget *policyv1.PodDisruptionBudget
Namespace string
}
// AddPodDisruptionBudget adds a PodDisruptionBudget to the workload cluster.
func AddPodDisruptionBudget(ctx context.Context, input AddPodDisruptionBudgetInput) {
Eventually(func() error {
budget, err := input.ClientSet.PolicyV1().PodDisruptionBudgets(input.Namespace).Create(ctx, input.Budget, metav1.CreateOptions{})
if budget != nil && err == nil {
return nil
}
return fmt.Errorf("podDisruptionBudget needs to be successfully deployed: %v", err)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "podDisruptionBudget needs to be successfully deployed")
}
// TODO(oscr): Delete below when Kubernetes 1.20 support is dropped.
// AddPodDisruptionBudgetInputV1Beta1 is the input for AddPodDisruptionBudgetV1Beta1.
type AddPodDisruptionBudgetInputV1Beta1 struct {
ClientSet *kubernetes.Clientset
Budget *v1beta1.PodDisruptionBudget
Namespace string
}
// AddPodDisruptionBudgetV1Beta1 adds a PodDisruptionBudget to the workload cluster.
func AddPodDisruptionBudgetV1Beta1(ctx context.Context, input AddPodDisruptionBudgetInputV1Beta1) {
Eventually(func() error {
budget, err := input.ClientSet.PolicyV1beta1().PodDisruptionBudgets(input.Namespace).Create(ctx, input.Budget, metav1.CreateOptions{})
if budget != nil && err == nil {
return nil
}
return fmt.Errorf("podDisruptionBudget needs to be successfully deployed: %v", err)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "podDisruptionBudget needs to be successfully deployed")
}

18
test/e2e/framework/doc.go Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package framework implements end to end testing.
package framework

View File

@ -0,0 +1,51 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Interfaces to scope down client.Client.
// Getter can get resources.
type Getter interface {
Get(ctx context.Context, key client.ObjectKey, obj client.Object) error
}
// Creator can creates resources.
type Creator interface {
Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error
}
// Lister can lists resources.
type Lister interface {
List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error
}
// Deleter can delete resources.
type Deleter interface {
Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error
}
// GetLister can get and list resources.
type GetLister interface {
Getter
Lister
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package log implements test framework logging.
package log
import (
"fmt"
. "github.com/onsi/ginkgo"
)
// Logf logs a message at the specified log level.
func Logf(format string, a ...interface{}) {
fmt.Fprintf(GinkgoWriter, "INFO: "+format+"\n", a...)
}

View File

@ -0,0 +1,277 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// GetMachinesByMachineDeploymentsInput is the input for GetMachinesByMachineDeployments.
type GetMachinesByMachineDeploymentsInput struct {
Lister Lister
ClusterName string
Namespace string
MachineDeployment clusterv1.MachineDeployment
}
// GetMachinesByMachineDeployments returns Machine objects for a cluster belonging to a machine deployment.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetMachinesByMachineDeployments(ctx context.Context, input GetMachinesByMachineDeploymentsInput) []clusterv1.Machine {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinesByMachineDeployments")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinesByMachineDeployments")
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinesByMachineDeployments")
Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinesByMachineDeployments")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling GetMachinesByMachineDeployments")
opts := byClusterOptions(input.ClusterName, input.Namespace)
opts = append(opts, machineDeploymentOptions(input.MachineDeployment)...)
machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, opts...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineList object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
return machineList.Items
}
// GetMachinesByMachineHealthCheckInput is the input for GetMachinesByMachineHealthCheck.
type GetMachinesByMachineHealthCheckInput struct {
Lister Lister
ClusterName string
MachineHealthCheck *clusterv1.MachineHealthCheck
}
// GetMachinesByMachineHealthCheck returns Machine objects for a cluster that match with MachineHealthCheck selector.
func GetMachinesByMachineHealthCheck(ctx context.Context, input GetMachinesByMachineHealthCheckInput) []clusterv1.Machine {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinesByMachineDeployments")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinesByMachineHealthCheck")
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinesByMachineHealthCheck")
Expect(input.MachineHealthCheck).ToNot(BeNil(), "Invalid argument. input.MachineHealthCheck can't be nil when calling GetMachinesByMachineHealthCheck")
opts := byClusterOptions(input.ClusterName, input.MachineHealthCheck.Namespace)
opts = append(opts, machineHealthCheckOptions(*input.MachineHealthCheck)...)
machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, opts...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineList object for Cluster %s", klog.KRef(input.MachineHealthCheck.Namespace, input.ClusterName))
return machineList.Items
}
// GetControlPlaneMachinesByClusterInput is the input for GetControlPlaneMachinesByCluster.
type GetControlPlaneMachinesByClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}
// GetControlPlaneMachinesByCluster returns the Machine objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetControlPlaneMachinesByCluster(ctx context.Context, input GetControlPlaneMachinesByClusterInput) []clusterv1.Machine {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetControlPlaneMachinesByCluster")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetControlPlaneMachinesByCluster")
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetControlPlaneMachinesByCluster")
Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetControlPlaneMachinesByCluster")
options := append(byClusterOptions(input.ClusterName, input.Namespace), controlPlaneMachineOptions()...)
machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, options...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineList object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
return machineList.Items
}
// WaitForControlPlaneMachinesToBeUpgradedInput is the input for WaitForControlPlaneMachinesToBeUpgraded.
type WaitForControlPlaneMachinesToBeUpgradedInput struct {
Lister Lister
Cluster *clusterv1.Cluster
KubernetesUpgradeVersion string
MachineCount int
}
// WaitForControlPlaneMachinesToBeUpgraded waits until all machines are upgraded to the correct Kubernetes version.
func WaitForControlPlaneMachinesToBeUpgraded(ctx context.Context, input WaitForControlPlaneMachinesToBeUpgradedInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneMachinesToBeUpgraded")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling WaitForControlPlaneMachinesToBeUpgraded")
Expect(input.KubernetesUpgradeVersion).ToNot(BeEmpty(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling WaitForControlPlaneMachinesToBeUpgraded")
Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForControlPlaneMachinesToBeUpgraded")
Byf("Ensuring all control-plane machines have upgraded kubernetes version %s", input.KubernetesUpgradeVersion)
Eventually(func() (int, error) {
machines := GetControlPlaneMachinesByCluster(ctx, GetControlPlaneMachinesByClusterInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
upgraded := 0
for _, machine := range machines {
m := machine
if *m.Spec.Version == input.KubernetesUpgradeVersion && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) {
upgraded++
}
}
if len(machines) > upgraded {
return 0, errors.New("old nodes remain")
}
return upgraded, nil
}, intervals...).Should(Equal(input.MachineCount), "Timed out waiting for all control-plane machines in Cluster %s to be upgraded to kubernetes version %s", klog.KObj(input.Cluster), input.KubernetesUpgradeVersion)
}
// WaitForMachineDeploymentMachinesToBeUpgradedInput is the input for WaitForMachineDeploymentMachinesToBeUpgraded.
type WaitForMachineDeploymentMachinesToBeUpgradedInput struct {
Lister Lister
Cluster *clusterv1.Cluster
KubernetesUpgradeVersion string
MachineCount int
MachineDeployment clusterv1.MachineDeployment
}
// WaitForMachineDeploymentMachinesToBeUpgraded waits until all machines belonging to a MachineDeployment are upgraded to the correct kubernetes version.
func WaitForMachineDeploymentMachinesToBeUpgraded(ctx context.Context, input WaitForMachineDeploymentMachinesToBeUpgradedInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentMachinesToBeUpgraded")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded")
Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded")
Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForMachineDeploymentMachinesToBeUpgraded")
log.Logf("Ensuring all MachineDeployment Machines have upgraded kubernetes version %s", input.KubernetesUpgradeVersion)
Eventually(func() (int, error) {
machines := GetMachinesByMachineDeployments(ctx, GetMachinesByMachineDeploymentsInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
MachineDeployment: input.MachineDeployment,
})
upgraded := 0
for _, machine := range machines {
if *machine.Spec.Version == input.KubernetesUpgradeVersion {
upgraded++
}
}
if len(machines) > upgraded {
return 0, errors.New("old nodes remain")
}
return upgraded, nil
}, intervals...).Should(Equal(input.MachineCount), "Timed out waiting for all MachineDeployment %s Machines to be upgraded to kubernetes version %s", klog.KObj(&input.MachineDeployment), input.KubernetesUpgradeVersion)
}
// PatchNodeConditionInput is the input for PatchNodeCondition.
type PatchNodeConditionInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
NodeCondition corev1.NodeCondition
Machine clusterv1.Machine
}
// PatchNodeCondition patches a node condition to any one of the machines with a node ref.
func PatchNodeCondition(ctx context.Context, input PatchNodeConditionInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for PatchNodeConditions")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling PatchNodeConditions")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling PatchNodeConditions")
Expect(input.NodeCondition).ToNot(BeNil(), "Invalid argument. input.NodeCondition can't be nil when calling PatchNodeConditions")
Expect(input.Machine).ToNot(BeNil(), "Invalid argument. input.Machine can't be nil when calling PatchNodeConditions")
log.Logf("Patching the node condition to the node")
Expect(input.Machine.Status.NodeRef).ToNot(BeNil())
node := &corev1.Node{}
Eventually(func() error {
return input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient().Get(ctx, types.NamespacedName{Name: input.Machine.Status.NodeRef.Name, Namespace: input.Machine.Status.NodeRef.Namespace}, node)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get node %s", input.Machine.Status.NodeRef.Name)
patchHelper, err := patch.NewHelper(node, input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient())
Expect(err).ToNot(HaveOccurred())
node.Status.Conditions = append(node.Status.Conditions, input.NodeCondition)
Eventually(func() error {
return patchHelper.Patch(ctx, node)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch node %s", input.Machine.Status.NodeRef.Name)
}
// MachineStatusCheck is a type that operates a status check on a Machine.
type MachineStatusCheck func(p *clusterv1.Machine) error
// WaitForMachineStatusCheckInput is the input for WaitForMachineStatusCheck.
type WaitForMachineStatusCheckInput struct {
Getter Getter
Machine *clusterv1.Machine
StatusChecks []MachineStatusCheck
}
// WaitForMachineStatusCheck waits for the specified status to be true for the machine.
func WaitForMachineStatusCheck(ctx context.Context, input WaitForMachineStatusCheckInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineStatusCheck")
Expect(input.Machine).ToNot(BeNil(), "Invalid argument. input.Machine can't be nil when calling WaitForMachineStatusCheck")
Expect(input.StatusChecks).ToNot(BeEmpty(), "Invalid argument. input.StatusCheck can't be empty when calling WaitForMachineStatusCheck")
Eventually(func() (bool, error) {
machine := &clusterv1.Machine{}
key := client.ObjectKey{
Namespace: input.Machine.Namespace,
Name: input.Machine.Name,
}
err := input.Getter.Get(ctx, key, machine)
Expect(err).NotTo(HaveOccurred())
for _, statusCheck := range input.StatusChecks {
err := statusCheck(machine)
if err != nil {
return false, err
}
}
return true, nil
}, intervals...).Should(BeTrue())
}
// MachineNodeRefCheck is a MachineStatusCheck ensuring that a NodeRef is assigned to the machine.
func MachineNodeRefCheck() MachineStatusCheck {
return func(machine *clusterv1.Machine) error {
if machine.Status.NodeRef == nil {
return errors.Errorf("NodeRef is not assigned to the machine %s", klog.KObj(machine))
}
return nil
}
}
// MachinePhaseCheck is a MachineStatusCheck ensuring that a machines is in the expected phase.
func MachinePhaseCheck(expectedPhase string) MachineStatusCheck {
return func(machine *clusterv1.Machine) error {
if machine.Status.Phase != expectedPhase {
return errors.Errorf("Machine %s is not in phase %s", klog.KObj(machine), expectedPhase)
}
return nil
}
}

View File

@ -0,0 +1,498 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// CreateMachineDeploymentInput is the input for CreateMachineDeployment.
type CreateMachineDeploymentInput struct {
Creator Creator
MachineDeployment *clusterv1.MachineDeployment
BootstrapConfigTemplate client.Object
InfraMachineTemplate client.Object
}
// CreateMachineDeployment creates the machine deployment and dependencies.
func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentInput) {
By("creating a core MachineDeployment resource")
Eventually(func() error {
return input.Creator.Create(ctx, input.MachineDeployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create MachineDeployment %s", klog.KObj(input.MachineDeployment))
By("creating a BootstrapConfigTemplate resource")
Eventually(func() error {
return input.Creator.Create(ctx, input.BootstrapConfigTemplate)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create BootstrapConfigTemplate %s", klog.KObj(input.BootstrapConfigTemplate))
By("creating an InfrastructureMachineTemplate resource")
Eventually(func() error {
return input.Creator.Create(ctx, input.InfraMachineTemplate)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create InfrastructureMachineTemplate %s", klog.KObj(input.InfraMachineTemplate))
}
// GetMachineDeploymentsByClusterInput is the input for GetMachineDeploymentsByCluster.
type GetMachineDeploymentsByClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}
// GetMachineDeploymentsByCluster returns the MachineDeployments objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetMachineDeploymentsByCluster(ctx context.Context, input GetMachineDeploymentsByClusterInput) []*clusterv1.MachineDeployment {
deploymentList := &clusterv1.MachineDeploymentList{}
Eventually(func() error {
return input.Lister.List(ctx, deploymentList, byClusterOptions(input.ClusterName, input.Namespace)...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineDeployments object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
deployments := make([]*clusterv1.MachineDeployment, len(deploymentList.Items))
for i := range deploymentList.Items {
Expect(deploymentList.Items[i].Spec.Replicas).ToNot(BeNil())
deployments[i] = &deploymentList.Items[i]
}
return deployments
}
// WaitForMachineDeploymentNodesToExistInput is the input for WaitForMachineDeploymentNodesToExist.
type WaitForMachineDeploymentNodesToExistInput struct {
Lister Lister
Cluster *clusterv1.Cluster
MachineDeployment *clusterv1.MachineDeployment
}
// WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist.
func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMachineDeploymentNodesToExistInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentNodesToExist")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling WaitForMachineDeploymentNodesToExist")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentNodesToExist")
By("Waiting for the workload nodes to exist")
Eventually(func() (int, error) {
selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector)
if err != nil {
return 0, err
}
ms := &clusterv1.MachineSetList{}
if err := input.Lister.List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return 0, err
}
if len(ms.Items) == 0 {
return 0, errors.New("no machinesets were found")
}
machineSet := ms.Items[0]
selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector)
if err != nil {
return 0, err
}
machines := &clusterv1.MachineList{}
if err := input.Lister.List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return 0, err
}
count := 0
for _, machine := range machines.Items {
if machine.Status.NodeRef != nil {
count++
}
}
return count, nil
}, intervals...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas)), "Timed out waiting for %d nodes to be created for MachineDeployment %s", int(*input.MachineDeployment.Spec.Replicas), klog.KObj(input.MachineDeployment))
}
// AssertMachineDeploymentFailureDomainsInput is the input for AssertMachineDeploymentFailureDomains.
type AssertMachineDeploymentFailureDomainsInput struct {
Lister Lister
Cluster *clusterv1.Cluster
MachineDeployment *clusterv1.MachineDeployment
}
// AssertMachineDeploymentFailureDomains will look at all MachineDeployment machines and see what failure domains they were
// placed in. If machines were placed in unexpected or wrong failure domains the expectation will fail.
func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMachineDeploymentFailureDomainsInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for AssertMachineDeploymentFailureDomains")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertMachineDeploymentFailureDomains")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling AssertMachineDeploymentFailureDomains")
machineDeploymentFD := pointer.StringDeref(input.MachineDeployment.Spec.Template.Spec.FailureDomain, "<None>")
Byf("Checking all the machines controlled by %s are in the %q failure domain", input.MachineDeployment.Name, machineDeploymentFD)
selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
ms := &clusterv1.MachineSetList{}
Eventually(func() error {
return input.Lister.List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap))
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineSets for Cluster %s", klog.KObj(input.Cluster))
for _, machineSet := range ms.Items {
machineSetFD := pointer.StringDeref(machineSet.Spec.Template.Spec.FailureDomain, "<None>")
Expect(machineSetFD).To(Equal(machineDeploymentFD), "MachineSet %s is in the %q failure domain, expecting %q", machineSet.Name, machineSetFD, machineDeploymentFD)
selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
machines := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap))
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(input.Cluster))
for _, machine := range machines.Items {
machineFD := pointer.StringDeref(machine.Spec.FailureDomain, "<None>")
Expect(machineFD).To(Equal(machineDeploymentFD), "Machine %s is in the %q failure domain, expecting %q", machine.Name, machineFD, machineDeploymentFD)
}
}
}
// DiscoveryAndWaitForMachineDeploymentsInput is the input type for DiscoveryAndWaitForMachineDeployments.
type DiscoveryAndWaitForMachineDeploymentsInput struct {
Lister Lister
Cluster *clusterv1.Cluster
}
// DiscoveryAndWaitForMachineDeployments discovers the MachineDeployments existing in a cluster and waits for them to be ready (all the machine provisioned).
func DiscoveryAndWaitForMachineDeployments(ctx context.Context, input DiscoveryAndWaitForMachineDeploymentsInput, intervals ...interface{}) []*clusterv1.MachineDeployment {
Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachineDeployments")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachineDeployments")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachineDeployments")
machineDeployments := GetMachineDeploymentsByCluster(ctx, GetMachineDeploymentsByClusterInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
for _, deployment := range machineDeployments {
WaitForMachineDeploymentNodesToExist(ctx, WaitForMachineDeploymentNodesToExistInput{
Lister: input.Lister,
Cluster: input.Cluster,
MachineDeployment: deployment,
}, intervals...)
AssertMachineDeploymentFailureDomains(ctx, AssertMachineDeploymentFailureDomainsInput{
Lister: input.Lister,
Cluster: input.Cluster,
MachineDeployment: deployment,
})
}
return machineDeployments
}
// UpgradeMachineDeploymentsAndWaitInput is the input type for UpgradeMachineDeploymentsAndWait.
type UpgradeMachineDeploymentsAndWaitInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
UpgradeVersion string
UpgradeMachineTemplate *string
MachineDeployments []*clusterv1.MachineDeployment
WaitForMachinesToBeUpgraded []interface{}
}
// UpgradeMachineDeploymentsAndWait upgrades a machine deployment and waits for its machines to be upgraded.
func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineDeploymentsAndWaitInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentsAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentsAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentsAndWait")
Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachineDeploymentsAndWait")
Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentsAndWait")
mgmtClient := input.ClusterProxy.GetClient()
for _, deployment := range input.MachineDeployments {
log.Logf("Patching the new kubernetes version to Machine Deployment %s", klog.KObj(deployment))
patchHelper, err := patch.NewHelper(deployment, mgmtClient)
Expect(err).ToNot(HaveOccurred())
oldVersion := deployment.Spec.Template.Spec.Version
deployment.Spec.Template.Spec.Version = &input.UpgradeVersion
if input.UpgradeMachineTemplate != nil {
deployment.Spec.Template.Spec.InfrastructureRef.Name = *input.UpgradeMachineTemplate
}
Eventually(func() error {
return patchHelper.Patch(ctx, deployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch Kubernetes version on MachineDeployment %s", klog.KObj(deployment))
log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded from %s to %s",
klog.KObj(deployment), *oldVersion, input.UpgradeVersion)
WaitForMachineDeploymentMachinesToBeUpgraded(ctx, WaitForMachineDeploymentMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: input.Cluster,
MachineCount: int(*deployment.Spec.Replicas),
KubernetesUpgradeVersion: input.UpgradeVersion,
MachineDeployment: *deployment,
}, input.WaitForMachinesToBeUpgraded...)
}
}
// WaitForMachineDeploymentRollingUpgradeToStartInput is the input for WaitForMachineDeploymentRollingUpgradeToStart.
type WaitForMachineDeploymentRollingUpgradeToStartInput struct {
Getter Getter
MachineDeployment *clusterv1.MachineDeployment
}
// WaitForMachineDeploymentRollingUpgradeToStart waits until rolling upgrade starts.
func WaitForMachineDeploymentRollingUpgradeToStart(ctx context.Context, input WaitForMachineDeploymentRollingUpgradeToStartInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentRollingUpgradeToStart")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentRollingUpgradeToStart")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentRollingUpgradeToStarts")
log.Logf("Waiting for MachineDeployment rolling upgrade to start")
Eventually(func(g Gomega) bool {
md := &clusterv1.MachineDeployment{}
g.Expect(input.Getter.Get(ctx, client.ObjectKey{Namespace: input.MachineDeployment.Namespace, Name: input.MachineDeployment.Name}, md)).To(Succeed())
return md.Status.Replicas != md.Status.AvailableReplicas
}, intervals...).Should(BeTrue())
}
// WaitForMachineDeploymentRollingUpgradeToCompleteInput is the input for WaitForMachineDeploymentRollingUpgradeToComplete.
type WaitForMachineDeploymentRollingUpgradeToCompleteInput struct {
Getter Getter
MachineDeployment *clusterv1.MachineDeployment
}
// WaitForMachineDeploymentRollingUpgradeToComplete waits until rolling upgrade is complete.
func WaitForMachineDeploymentRollingUpgradeToComplete(ctx context.Context, input WaitForMachineDeploymentRollingUpgradeToCompleteInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentRollingUpgradeToComplete")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentRollingUpgradeToComplete")
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentRollingUpgradeToComplete")
log.Logf("Waiting for MachineDeployment rolling upgrade to complete")
Eventually(func(g Gomega) bool {
md := &clusterv1.MachineDeployment{}
g.Expect(input.Getter.Get(ctx, client.ObjectKey{Namespace: input.MachineDeployment.Namespace, Name: input.MachineDeployment.Name}, md)).To(Succeed())
return md.Status.Replicas == md.Status.AvailableReplicas
}, intervals...).Should(BeTrue())
}
// UpgradeMachineDeploymentInfrastructureRefAndWaitInput is the input type for UpgradeMachineDeploymentInfrastructureRefAndWait.
type UpgradeMachineDeploymentInfrastructureRefAndWaitInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
MachineDeployments []*clusterv1.MachineDeployment
WaitForMachinesToBeUpgraded []interface{}
}
// UpgradeMachineDeploymentInfrastructureRefAndWait upgrades a machine deployment infrastructure ref and waits for its machines to be upgraded.
func UpgradeMachineDeploymentInfrastructureRefAndWait(ctx context.Context, input UpgradeMachineDeploymentInfrastructureRefAndWaitInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentInfrastructureRefAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentInfrastructureRefAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentInfrastructureRefAndWait")
Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentInfrastructureRefAndWait")
mgmtClient := input.ClusterProxy.GetClient()
for _, deployment := range input.MachineDeployments {
log.Logf("Patching the new infrastructure ref to Machine Deployment %s", klog.KObj(deployment))
// Retrieve infra object
infraRef := deployment.Spec.Template.Spec.InfrastructureRef
infraObj := &unstructured.Unstructured{}
infraObj.SetGroupVersionKind(infraRef.GroupVersionKind())
key := client.ObjectKey{
Namespace: input.Cluster.Namespace,
Name: infraRef.Name,
}
Eventually(func() error {
return mgmtClient.Get(ctx, key, infraObj)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to get infra object %s for MachineDeployment %s", klog.KRef(key.Namespace, key.Name), klog.KObj(deployment))
// Creates a new infra object
newInfraObj := infraObj
newInfraObjName := fmt.Sprintf("%s-%s", infraRef.Name, util.RandomString(6))
newInfraObj.SetName(newInfraObjName)
newInfraObj.SetResourceVersion("")
Eventually(func() error {
return mgmtClient.Create(ctx, newInfraObj)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create new infrastructure object %s for MachineDeployment %s", klog.KObj(infraObj), klog.KObj(deployment))
// Patch the new infra object's ref to the machine deployment
patchHelper, err := patch.NewHelper(deployment, mgmtClient)
Expect(err).ToNot(HaveOccurred())
infraRef.Name = newInfraObjName
deployment.Spec.Template.Spec.InfrastructureRef = infraRef
Eventually(func() error {
return patchHelper.Patch(ctx, deployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch new infrastructure ref to MachineDeployment %s", klog.KObj(deployment))
log.Logf("Waiting for rolling upgrade to start.")
WaitForMachineDeploymentRollingUpgradeToStart(ctx, WaitForMachineDeploymentRollingUpgradeToStartInput{
Getter: mgmtClient,
MachineDeployment: deployment,
}, input.WaitForMachinesToBeUpgraded...)
log.Logf("Waiting for rolling upgrade to complete.")
WaitForMachineDeploymentRollingUpgradeToComplete(ctx, WaitForMachineDeploymentRollingUpgradeToCompleteInput{
Getter: mgmtClient,
MachineDeployment: deployment,
}, input.WaitForMachinesToBeUpgraded...)
}
}
// machineDeploymentOptions returns a set of ListOptions that allows to get all machine objects belonging to a machine deployment.
func machineDeploymentOptions(deployment clusterv1.MachineDeployment) []client.ListOption {
return []client.ListOption{
client.MatchingLabels(deployment.Spec.Selector.MatchLabels),
}
}
// ScaleAndWaitMachineDeploymentInput is the input for ScaleAndWaitMachineDeployment.
type ScaleAndWaitMachineDeploymentInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
MachineDeployment *clusterv1.MachineDeployment
Replicas int32
WaitForMachineDeployments []interface{}
}
// ScaleAndWaitMachineDeployment scales MachineDeployment and waits until all machines have node ref and equal to Replicas.
func ScaleAndWaitMachineDeployment(ctx context.Context, input ScaleAndWaitMachineDeploymentInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for ScaleAndWaitMachineDeployment")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ScaleAndWaitMachineDeployment")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ScaleAndWaitMachineDeployment")
log.Logf("Scaling machine deployment %s from %d to %d replicas", klog.KObj(input.MachineDeployment), *input.MachineDeployment.Spec.Replicas, input.Replicas)
patchHelper, err := patch.NewHelper(input.MachineDeployment, input.ClusterProxy.GetClient())
Expect(err).ToNot(HaveOccurred())
input.MachineDeployment.Spec.Replicas = pointer.Int32(input.Replicas)
Eventually(func() error {
return patchHelper.Patch(ctx, input.MachineDeployment)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine deployment %s", klog.KObj(input.MachineDeployment))
log.Logf("Waiting for correct number of replicas to exist")
Eventually(func() (int, error) {
selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector)
if err != nil {
return -1, err
}
ms := &clusterv1.MachineSetList{}
if err := input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return -1, err
}
if len(ms.Items) == 0 {
return -1, errors.New("no machinesets were found")
}
machineSet := ms.Items[0]
selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector)
if err != nil {
return -1, err
}
machines := &clusterv1.MachineList{}
if err := input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return -1, err
}
nodeRefCount := 0
for _, machine := range machines.Items {
if machine.Status.NodeRef != nil {
nodeRefCount++
}
}
if len(machines.Items) != nodeRefCount {
return -1, errors.New("Machine count does not match existing nodes count")
}
return nodeRefCount, nil
}, input.WaitForMachineDeployments...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(input.MachineDeployment), *input.MachineDeployment.Spec.Replicas)
}
// ScaleAndWaitMachineDeploymentTopologyInput is the input for ScaleAndWaitMachineDeployment.
type ScaleAndWaitMachineDeploymentTopologyInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
Replicas int32
WaitForMachineDeployments []interface{}
}
// ScaleAndWaitMachineDeploymentTopology scales MachineDeployment topology and waits until all machines have node ref and equal to Replicas.
func ScaleAndWaitMachineDeploymentTopology(ctx context.Context, input ScaleAndWaitMachineDeploymentTopologyInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for ScaleAndWaitMachineDeployment")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ScaleAndWaitMachineDeployment")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ScaleAndWaitMachineDeployment")
Expect(input.Cluster.Spec.Topology.Workers).ToNot(BeNil(), "Invalid argument. input.Cluster must have MachineDeployment topologies")
Expect(len(input.Cluster.Spec.Topology.Workers.MachineDeployments) >= 1).To(BeTrue(), "Invalid argument. input.Cluster must have at least one MachineDeployment topology")
mdTopology := input.Cluster.Spec.Topology.Workers.MachineDeployments[0]
log.Logf("Scaling machine deployment topology %s from %d to %d replicas", mdTopology.Name, *mdTopology.Replicas, input.Replicas)
patchHelper, err := patch.NewHelper(input.Cluster, input.ClusterProxy.GetClient())
Expect(err).ToNot(HaveOccurred())
mdTopology.Replicas = pointer.Int32(input.Replicas)
input.Cluster.Spec.Topology.Workers.MachineDeployments[0] = mdTopology
Eventually(func() error {
return patchHelper.Patch(ctx, input.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine deployment topology %s", mdTopology.Name)
log.Logf("Waiting for correct number of replicas to exist")
deploymentList := &clusterv1.MachineDeploymentList{}
Eventually(func() error {
return input.ClusterProxy.GetClient().List(ctx, deploymentList,
client.InNamespace(input.Cluster.Namespace),
client.MatchingLabels{
clusterv1.ClusterLabelName: input.Cluster.Name,
clusterv1.ClusterTopologyMachineDeploymentLabelName: mdTopology.Name,
},
)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineDeployments object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
Expect(deploymentList.Items).To(HaveLen(1))
md := deploymentList.Items[0]
Eventually(func() (int, error) {
selectorMap, err := metav1.LabelSelectorAsMap(&md.Spec.Selector)
if err != nil {
return -1, err
}
ms := &clusterv1.MachineSetList{}
if err := input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return -1, err
}
if len(ms.Items) == 0 {
return -1, errors.New("no machinesets were found")
}
machineSet := ms.Items[0]
selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector)
if err != nil {
return -1, err
}
machines := &clusterv1.MachineList{}
if err := input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil {
return -1, err
}
nodeRefCount := 0
for _, machine := range machines.Items {
if machine.Status.NodeRef != nil {
nodeRefCount++
}
}
if len(machines.Items) != nodeRefCount {
return -1, errors.New("Machine count does not match existing nodes count")
}
return nodeRefCount, nil
}, input.WaitForMachineDeployments...).Should(Equal(int(*md.Spec.Replicas)), "Timed out waiting for Machine Deployment %s to have %d replicas", klog.KObj(&md), *md.Spec.Replicas)
}

View File

@ -0,0 +1,180 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// DiscoverMachineHealthCheckAndWaitForRemediationInput is the input for DiscoverMachineHealthCheckAndWait.
type DiscoverMachineHealthCheckAndWaitForRemediationInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
WaitForMachineRemediation []interface{}
}
// DiscoverMachineHealthChecksAndWaitForRemediation patches an unhealthy node condition to one node observed by the Machine Health Check and then wait for remediation.
func DiscoverMachineHealthChecksAndWaitForRemediation(ctx context.Context, input DiscoverMachineHealthCheckAndWaitForRemediationInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverMachineHealthChecksAndWaitForRemediation")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling DiscoverMachineHealthChecksAndWaitForRemediation")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverMachineHealthChecksAndWaitForRemediation")
mgmtClient := input.ClusterProxy.GetClient()
fmt.Fprintln(GinkgoWriter, "Discovering machine health check resources")
machineHealthChecks := GetMachineHealthChecksForCluster(ctx, GetMachineHealthChecksForClusterInput{
Lister: mgmtClient,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
Expect(machineHealthChecks).NotTo(BeEmpty())
for _, mhc := range machineHealthChecks {
Expect(mhc.Spec.UnhealthyConditions).NotTo(BeEmpty())
fmt.Fprintln(GinkgoWriter, "Ensuring there is at least 1 Machine that MachineHealthCheck is matching")
machines := GetMachinesByMachineHealthCheck(ctx, GetMachinesByMachineHealthCheckInput{
Lister: mgmtClient,
ClusterName: input.Cluster.Name,
MachineHealthCheck: mhc,
})
Expect(machines).NotTo(BeEmpty())
fmt.Fprintln(GinkgoWriter, "Patching MachineHealthCheck unhealthy condition to one of the nodes")
unhealthyNodeCondition := corev1.NodeCondition{
Type: mhc.Spec.UnhealthyConditions[0].Type,
Status: mhc.Spec.UnhealthyConditions[0].Status,
LastTransitionTime: metav1.Time{Time: time.Now()},
}
PatchNodeCondition(ctx, PatchNodeConditionInput{
ClusterProxy: input.ClusterProxy,
Cluster: input.Cluster,
NodeCondition: unhealthyNodeCondition,
Machine: machines[0],
})
fmt.Fprintln(GinkgoWriter, "Waiting for remediation")
WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition(ctx, WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput{
ClusterProxy: input.ClusterProxy,
Cluster: input.Cluster,
MachineHealthCheck: mhc,
MachinesCount: len(machines),
}, input.WaitForMachineRemediation...)
}
}
// GetMachineHealthChecksForClusterInput is the input for GetMachineHealthChecksForCluster.
type GetMachineHealthChecksForClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}
// GetMachineHealthChecksForCluster returns the MachineHealthCheck objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetMachineHealthChecksForCluster(ctx context.Context, input GetMachineHealthChecksForClusterInput) []*clusterv1.MachineHealthCheck {
machineHealthCheckList := &clusterv1.MachineHealthCheckList{}
Eventually(func() error {
return input.Lister.List(ctx, machineHealthCheckList, byClusterOptions(input.ClusterName, input.Namespace)...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachineDeployments object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
machineHealthChecks := make([]*clusterv1.MachineHealthCheck, len(machineHealthCheckList.Items))
for i := range machineHealthCheckList.Items {
machineHealthChecks[i] = &machineHealthCheckList.Items[i]
}
return machineHealthChecks
}
// machineHealthCheckOptions returns a set of ListOptions that allows to get all machine objects belonging to a MachineHealthCheck.
func machineHealthCheckOptions(machineHealthCheck clusterv1.MachineHealthCheck) []client.ListOption {
return []client.ListOption{
client.MatchingLabels(machineHealthCheck.Spec.Selector.MatchLabels),
}
}
// WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput is the input for WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition.
type WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
MachineHealthCheck *clusterv1.MachineHealthCheck
MachinesCount int
}
// WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition patches a node condition to any one of the machines with a node ref.
func WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition(ctx context.Context, input WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition")
Expect(input.MachineHealthCheck).NotTo(BeNil(), "Invalid argument. input.MachineHealthCheck can't be nil when calling WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition")
Expect(input.MachinesCount).NotTo(BeZero(), "Invalid argument. input.MachinesCount can't be zero when calling WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition")
fmt.Fprintln(GinkgoWriter, "Waiting until the node with unhealthy node condition is remediated")
Eventually(func() bool {
machines := GetMachinesByMachineHealthCheck(ctx, GetMachinesByMachineHealthCheckInput{
Lister: input.ClusterProxy.GetClient(),
ClusterName: input.Cluster.Name,
MachineHealthCheck: input.MachineHealthCheck,
})
// Wait for all the machines to exists.
// NOTE: this is required given that this helper is called after a remediation
// and we want to make sure all the machine are back in place before testing for unhealthyCondition being fixed.
if len(machines) < input.MachinesCount {
return false
}
for _, machine := range machines {
if machine.Status.NodeRef == nil {
return false
}
node := &corev1.Node{}
// This should not be an Expect(), because it may return error during machine deletion.
err := input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient().Get(ctx, types.NamespacedName{Name: machine.Status.NodeRef.Name, Namespace: machine.Status.NodeRef.Namespace}, node)
if err != nil {
return false
}
if hasMatchingUnhealthyConditions(input.MachineHealthCheck, node.Status.Conditions) {
return false
}
}
return true
}, intervals...).Should(BeTrue())
}
// hasMatchingUnhealthyConditions returns true if any node condition matches with machine health check unhealthy conditions.
func hasMatchingUnhealthyConditions(machineHealthCheck *clusterv1.MachineHealthCheck, nodeConditions []corev1.NodeCondition) bool {
for _, unhealthyCondition := range machineHealthCheck.Spec.UnhealthyConditions {
for _, nodeCondition := range nodeConditions {
if nodeCondition.Type == unhealthyCondition.Type && nodeCondition.Status == unhealthyCondition.Status {
return true
}
}
}
return false
}

View File

@ -0,0 +1,306 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"strings"
"github.com/blang/semver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// GetMachinePoolsByClusterInput is the input for GetMachinePoolsByCluster.
type GetMachinePoolsByClusterInput struct {
Lister Lister
ClusterName string
Namespace string
}
// GetMachinePoolsByCluster returns the MachinePools objects for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByClusterInput) []*expv1.MachinePool {
Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinePoolsByCluster")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinePoolsByCluster")
Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinePoolsByCluster")
Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinePoolsByCluster")
mpList := &expv1.MachinePoolList{}
Eventually(func() error {
return input.Lister.List(ctx, mpList, byClusterOptions(input.ClusterName, input.Namespace)...)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachinePools object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName))
mps := make([]*expv1.MachinePool, len(mpList.Items))
for i := range mpList.Items {
mps[i] = &mpList.Items[i]
}
return mps
}
// WaitForMachinePoolNodesToExistInput is the input for WaitForMachinePoolNodesToExist.
type WaitForMachinePoolNodesToExistInput struct {
Getter Getter
MachinePool *expv1.MachinePool
}
// WaitForMachinePoolNodesToExist waits until all nodes associated with a machine pool exist.
func WaitForMachinePoolNodesToExist(ctx context.Context, input WaitForMachinePoolNodesToExistInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolNodesToExist")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolNodesToExist")
Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolNodesToExist")
By("Waiting for the machine pool workload nodes")
Eventually(func() (int, error) {
nn := client.ObjectKey{
Namespace: input.MachinePool.Namespace,
Name: input.MachinePool.Name,
}
if err := input.Getter.Get(ctx, nn, input.MachinePool); err != nil {
return 0, err
}
return int(input.MachinePool.Status.ReadyReplicas), nil
}, intervals...).Should(Equal(int(*input.MachinePool.Spec.Replicas)), "Timed out waiting for %v ready replicas for MachinePool %s", *input.MachinePool.Spec.Replicas, klog.KObj(input.MachinePool))
}
// DiscoveryAndWaitForMachinePoolsInput is the input type for DiscoveryAndWaitForMachinePools.
type DiscoveryAndWaitForMachinePoolsInput struct {
Getter Getter
Lister Lister
Cluster *clusterv1.Cluster
}
// DiscoveryAndWaitForMachinePools discovers the MachinePools existing in a cluster and waits for them to be ready (all the machines provisioned).
func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*expv1.MachinePool {
Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachinePools")
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachinePools")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachinePools")
machinePools := GetMachinePoolsByCluster(ctx, GetMachinePoolsByClusterInput{
Lister: input.Lister,
ClusterName: input.Cluster.Name,
Namespace: input.Cluster.Namespace,
})
for _, machinepool := range machinePools {
WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{
Getter: input.Getter,
MachinePool: machinepool,
}, intervals...)
// TODO: check for failure domains; currently MP doesn't provide a way to check where Machine are placed
// (checking infrastructure is the only alternative, but this makes test not portable)
}
return machinePools
}
// UpgradeMachinePoolAndWaitInput is the input type for UpgradeMachinePoolAndWait.
type UpgradeMachinePoolAndWaitInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
UpgradeVersion string
MachinePools []*expv1.MachinePool
WaitForMachinePoolToBeUpgraded []interface{}
}
// UpgradeMachinePoolAndWait upgrades a machine pool and waits for its instances to be upgraded.
func UpgradeMachinePoolAndWait(ctx context.Context, input UpgradeMachinePoolAndWaitInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachinePoolAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachinePoolAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachinePoolAndWait")
Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachinePoolAndWait")
Expect(input.MachinePools).ToNot(BeNil(), "Invalid argument. input.MachinePools can't be empty when calling UpgradeMachinePoolAndWait")
mgmtClient := input.ClusterProxy.GetClient()
for i := range input.MachinePools {
mp := input.MachinePools[i]
log.Logf("Patching the new Kubernetes version to Machine Pool %s", klog.KObj(mp))
patchHelper, err := patch.NewHelper(mp, mgmtClient)
Expect(err).ToNot(HaveOccurred())
// Store old version.
oldVersion := mp.Spec.Template.Spec.Version
// Upgrade to new Version.
mp.Spec.Template.Spec.Version = &input.UpgradeVersion
// Drop "-cgroupfs" suffix from BootstrapConfig ref name, i.e. we switch from a
// BootstrapConfig with pinned cgroupfs cgroupDriver to the regular BootstrapConfig.
// This is a workaround for CAPD, because kind and CAPD only support:
// * cgroupDriver cgroupfs for Kubernetes < v1.24
// * cgroupDriver systemd for Kubernetes >= v1.24.
// We can remove this as soon as we don't test upgrades from Kubernetes < v1.24 anymore with CAPD
// or MachinePools are supported in ClusterClass.
if mp.Spec.Template.Spec.InfrastructureRef.Kind == "DockerMachinePool" {
version, err := semver.ParseTolerant(input.UpgradeVersion)
Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to parse UpgradeVersion %q", input.UpgradeVersion))
if version.GTE(semver.MustParse("1.24.0")) && strings.HasSuffix(mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name, "-cgroupfs") {
mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name = strings.TrimSuffix(mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name, "-cgroupfs")
// We have to set DataSecretName to nil, so the secret of the new bootstrap ConfigRef gets picked up.
mp.Spec.Template.Spec.Bootstrap.DataSecretName = nil
}
}
Eventually(func() error {
return patchHelper.Patch(ctx, mp)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch the new Kubernetes version to Machine Pool %s", klog.KObj(mp))
log.Logf("Waiting for Kubernetes versions of machines in MachinePool %s to be upgraded from %s to %s",
klog.KObj(mp), *oldVersion, input.UpgradeVersion)
WaitForMachinePoolInstancesToBeUpgraded(ctx, WaitForMachinePoolInstancesToBeUpgradedInput{
Getter: mgmtClient,
WorkloadClusterGetter: input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient(),
Cluster: input.Cluster,
MachineCount: int(*mp.Spec.Replicas),
KubernetesUpgradeVersion: input.UpgradeVersion,
MachinePool: mp,
}, input.WaitForMachinePoolToBeUpgraded...)
}
}
// ScaleMachinePoolAndWaitInput is the input type for ScaleMachinePoolAndWait.
type ScaleMachinePoolAndWaitInput struct {
ClusterProxy ClusterProxy
Cluster *clusterv1.Cluster
Replicas int32
MachinePools []*expv1.MachinePool
WaitForMachinePoolToScale []interface{}
}
// ScaleMachinePoolAndWait scales a machine pool and waits for its instances to scale up.
func ScaleMachinePoolAndWait(ctx context.Context, input ScaleMachinePoolAndWaitInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachinePoolAndWait")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachinePoolAndWait")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachinePoolAndWait")
Expect(input.MachinePools).ToNot(BeNil(), "Invalid argument. input.MachinePools can't be empty when calling UpgradeMachinePoolAndWait")
mgmtClient := input.ClusterProxy.GetClient()
for _, mp := range input.MachinePools {
log.Logf("Patching the replica count in Machine Pool %s", klog.KObj(mp))
patchHelper, err := patch.NewHelper(mp, mgmtClient)
Expect(err).ToNot(HaveOccurred())
mp.Spec.Replicas = &input.Replicas
Eventually(func() error {
return patchHelper.Patch(ctx, mp)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch MachinePool %s", klog.KObj(mp))
}
for _, mp := range input.MachinePools {
WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{
Getter: mgmtClient,
MachinePool: mp,
}, input.WaitForMachinePoolToScale...)
}
}
// WaitForMachinePoolInstancesToBeUpgradedInput is the input for WaitForMachinePoolInstancesToBeUpgraded.
type WaitForMachinePoolInstancesToBeUpgradedInput struct {
Getter Getter
WorkloadClusterGetter Getter
Cluster *clusterv1.Cluster
KubernetesUpgradeVersion string
MachineCount int
MachinePool *expv1.MachinePool
}
// WaitForMachinePoolInstancesToBeUpgraded waits until all instances belonging to a MachinePool are upgraded to the correct kubernetes version.
func WaitForMachinePoolInstancesToBeUpgraded(ctx context.Context, input WaitForMachinePoolInstancesToBeUpgradedInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachinePoolInstancesToBeUpgraded")
Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded")
Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded")
Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling WaitForMachinePoolInstancesToBeUpgraded")
Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForMachinePoolInstancesToBeUpgraded")
log.Logf("Ensuring all MachinePool Instances have upgraded kubernetes version %s", input.KubernetesUpgradeVersion)
Eventually(func() (int, error) {
nn := client.ObjectKey{
Namespace: input.MachinePool.Namespace,
Name: input.MachinePool.Name,
}
if err := input.Getter.Get(ctx, nn, input.MachinePool); err != nil {
return 0, err
}
versions := getMachinePoolInstanceVersions(ctx, GetMachinesPoolInstancesInput{
WorkloadClusterGetter: input.WorkloadClusterGetter,
Namespace: input.Cluster.Namespace,
MachinePool: input.MachinePool,
})
matches := 0
for _, version := range versions {
if version == input.KubernetesUpgradeVersion {
matches++
}
}
if matches != len(versions) {
return 0, errors.New("old version instances remain")
}
return matches, nil
}, intervals...).Should(Equal(input.MachineCount), "Timed out waiting for all MachinePool %s instances to be upgraded to Kubernetes version %s", klog.KObj(input.MachinePool), input.KubernetesUpgradeVersion)
}
// GetMachinesPoolInstancesInput is the input for GetMachinesPoolInstances.
type GetMachinesPoolInstancesInput struct {
WorkloadClusterGetter Getter
Namespace string
MachinePool *expv1.MachinePool
}
// getMachinePoolInstanceVersions returns the Kubernetes versions of the machine pool instances.
func getMachinePoolInstanceVersions(ctx context.Context, input GetMachinesPoolInstancesInput) []string {
Expect(ctx).NotTo(BeNil(), "ctx is required for getMachinePoolInstanceVersions")
Expect(input.WorkloadClusterGetter).ToNot(BeNil(), "Invalid argument. input.WorkloadClusterGetter can't be nil when calling getMachinePoolInstanceVersions")
Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling getMachinePoolInstanceVersions")
Expect(input.MachinePool).ToNot(BeNil(), "Invalid argument. input.MachinePool can't be nil when calling getMachinePoolInstanceVersions")
instances := input.MachinePool.Status.NodeRefs
versions := make([]string, len(instances))
for i, instance := range instances {
node := &corev1.Node{}
err := wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
err := input.WorkloadClusterGetter.Get(ctx, client.ObjectKey{Name: instance.Name}, node)
if err != nil {
return false, nil //nolint:nilerr
}
return true, nil
})
if err != nil {
versions[i] = "unknown"
} else {
versions[i] = node.Status.NodeInfo.KubeletVersion
}
}
return versions
}

View File

@ -0,0 +1,198 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/test/e2e/framework/internal/log"
)
// CreateNamespaceInput is the input type for CreateNamespace.
type CreateNamespaceInput struct {
Creator Creator
Name string
}
// CreateNamespace is used to create a namespace object.
// If name is empty, a "test-" + util.RandomString(6) name will be generated.
func CreateNamespace(ctx context.Context, input CreateNamespaceInput, intervals ...interface{}) *corev1.Namespace {
Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteNamespace")
Expect(input.Creator).NotTo(BeNil(), "input.Creator is required for CreateNamespace")
if input.Name == "" {
input.Name = fmt.Sprintf("test-%s", util.RandomString(6))
}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: input.Name,
},
}
log.Logf("Creating namespace %s", input.Name)
Eventually(func() error {
return input.Creator.Create(ctx, ns)
}, intervals...).Should(Succeed(), "Failed to create namespace %s", input.Name)
return ns
}
// EnsureNamespace verifies if a namespaces exists. If it doesn't it will
// create the namespace.
func EnsureNamespace(ctx context.Context, mgmt client.Client, namespace string) {
ns := &corev1.Namespace{}
err := mgmt.Get(ctx, client.ObjectKey{Name: namespace}, ns)
if err != nil && apierrors.IsNotFound(err) {
ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
Eventually(func() error {
return mgmt.Create(ctx, ns)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create namespace %q", namespace)
}
}
// DeleteNamespaceInput is the input type for DeleteNamespace.
type DeleteNamespaceInput struct {
Deleter Deleter
Name string
}
// DeleteNamespace is used to delete namespace object.
func DeleteNamespace(ctx context.Context, input DeleteNamespaceInput, intervals ...interface{}) {
Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteNamespace")
Expect(input.Deleter).NotTo(BeNil(), "input.Deleter is required for DeleteNamespace")
Expect(input.Name).NotTo(BeEmpty(), "input.Name is required for DeleteNamespace")
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: input.Name,
},
}
log.Logf("Deleting namespace %s", input.Name)
Eventually(func() error {
return input.Deleter.Delete(ctx, ns)
}, intervals...).Should(Succeed(), "Failed to delete namespace %s", input.Name)
}
// WatchNamespaceEventsInput is the input type for WatchNamespaceEvents.
type WatchNamespaceEventsInput struct {
ClientSet *kubernetes.Clientset
Name string
LogFolder string
}
// WatchNamespaceEvents creates a watcher that streams namespace events into a file.
// Example usage:
//
// ctx, cancelWatches := context.WithCancel(context.Background())
// go func() {
// defer GinkgoRecover()
// framework.WatchNamespaceEvents(ctx, framework.WatchNamespaceEventsInput{
// ClientSet: clientSet,
// Name: namespace.Name,
// LogFolder: logFolder,
// })
// }()
// defer cancelWatches()
func WatchNamespaceEvents(ctx context.Context, input WatchNamespaceEventsInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for WatchNamespaceEvents")
Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for WatchNamespaceEvents")
Expect(input.Name).NotTo(BeEmpty(), "input.Name is required for WatchNamespaceEvents")
logFile := filepath.Clean(path.Join(input.LogFolder, "resources", input.Name, "events.log"))
Expect(os.MkdirAll(filepath.Dir(logFile), 0750)).To(Succeed())
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
Expect(err).NotTo(HaveOccurred())
defer f.Close()
informerFactory := informers.NewSharedInformerFactoryWithOptions(
input.ClientSet,
10*time.Minute,
informers.WithNamespace(input.Name),
)
eventInformer := informerFactory.Core().V1().Events().Informer()
eventInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e := obj.(*corev1.Event)
_, _ = f.WriteString(fmt.Sprintf("[New Event] %s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n",
klog.KObj(e), e.InvolvedObject.APIVersion, e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e))
},
UpdateFunc: func(_, obj interface{}) {
e := obj.(*corev1.Event)
_, _ = f.WriteString(fmt.Sprintf("[Updated Event] %s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n",
klog.KObj(e), e.InvolvedObject.APIVersion, e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e))
},
DeleteFunc: func(obj interface{}) {},
})
stopInformer := make(chan struct{})
defer close(stopInformer)
informerFactory.Start(stopInformer)
<-ctx.Done()
stopInformer <- struct{}{}
}
// CreateNamespaceAndWatchEventsInput is the input type for CreateNamespaceAndWatchEvents.
type CreateNamespaceAndWatchEventsInput struct {
Creator Creator
ClientSet *kubernetes.Clientset
Name string
LogFolder string
}
// CreateNamespaceAndWatchEvents creates a namespace and setups a watch for the namespace events.
func CreateNamespaceAndWatchEvents(ctx context.Context, input CreateNamespaceAndWatchEventsInput) (*corev1.Namespace, context.CancelFunc) {
Expect(ctx).NotTo(BeNil(), "ctx is required for CreateNamespaceAndWatchEvents")
Expect(input.Creator).ToNot(BeNil(), "Invalid argument. input.Creator can't be nil when calling CreateNamespaceAndWatchEvents")
Expect(input.ClientSet).ToNot(BeNil(), "Invalid argument. input.ClientSet can't be nil when calling ClientSet")
Expect(input.Name).ToNot(BeEmpty(), "Invalid argument. input.Name can't be empty when calling ClientSet")
Expect(os.MkdirAll(input.LogFolder, 0750)).To(Succeed(), "Invalid argument. input.LogFolder can't be created in CreateNamespaceAndWatchEvents")
namespace := CreateNamespace(ctx, CreateNamespaceInput{Creator: input.Creator, Name: input.Name}, "40s", "10s")
Expect(namespace).ToNot(BeNil(), "Failed to create namespace %q", input.Name)
log.Logf("Creating event watcher for namespace %q", input.Name)
watchesCtx, cancelWatches := context.WithCancel(ctx)
go func() {
defer GinkgoRecover()
WatchNamespaceEvents(watchesCtx, WatchNamespaceEventsInput{
ClientSet: input.ClientSet,
Name: namespace.Name,
LogFolder: input.LogFolder,
})
}()
return namespace, cancelWatches
}

View File

@ -0,0 +1,94 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"strings"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// PodListCondition is a type that operates a condition on a Pod.
type PodListCondition func(p *corev1.PodList) error
// WaitForPodListConditionInput is the input args for WaitForPodListCondition.
type WaitForPodListConditionInput struct {
Lister Lister
ListOptions *client.ListOptions
Condition PodListCondition
}
// WaitForPodListCondition waits for the specified condition to be true for all
// pods returned from the list filter.
func WaitForPodListCondition(ctx context.Context, input WaitForPodListConditionInput, intervals ...interface{}) {
Eventually(func() (bool, error) {
podList := &corev1.PodList{}
if err := input.Lister.List(ctx, podList, input.ListOptions); err != nil {
return false, err
}
// all pods in the list should satisfy the condition
err := input.Condition(podList)
if err != nil {
return false, err
}
return true, nil
}, intervals...).Should(BeTrue())
}
// EtcdImageTagCondition returns a podListCondition that ensures the pod image
// contains the specified image tag.
func EtcdImageTagCondition(expectedTag string, expectedCount int) PodListCondition {
return func(pl *corev1.PodList) error {
countWithCorrectTag := 0
for _, pod := range pl.Items {
if strings.Contains(pod.Spec.Containers[0].Image, expectedTag) {
countWithCorrectTag++
}
}
if countWithCorrectTag != expectedCount {
return errors.Errorf("etcdImageTagCondition: expected %d pods to have image tag %q, got %d", expectedCount, expectedTag, countWithCorrectTag)
}
// This check is to ensure that if there are three controlplane nodes,
// then there are only three etcd pods running. Currently, we create a
// new etcd pod before deleting the previous one. So we can have a
// case where there are three etcd pods with the correct tag and one
// left over that has yet to be deleted.
if len(pl.Items) != expectedCount {
return errors.Errorf("etcdImageTagCondition: expected %d pods, got %d", expectedCount, len(pl.Items))
}
return nil
}
}
// PhasePodCondition is a podListCondition ensuring that pods are in the expected
// pod phase.
func PhasePodCondition(expectedPhase corev1.PodPhase) PodListCondition {
return func(pl *corev1.PodList) error {
for _, pod := range pl.Items {
if pod.Status.Phase != expectedPhase {
return errors.Errorf("pod %q is not %s", pod.Name, expectedPhase)
}
}
return nil
}
}

View File

@ -14,7 +14,7 @@
limitations under the License.
*/
package e2e
package capkk
import (
"fmt"

View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package capkk implements end to end testing.
package capkk

View File

@ -17,7 +17,7 @@
limitations under the License.
*/
package e2e
package capkk
import (
"flag"
@ -113,7 +113,7 @@ func TestE2E(t *testing.T) {
defer w.Close()
}
RunSpecs(t, "capi-e2e")
RunSpecs(t, "capkk-e2e")
}
// Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads).

View File

@ -17,7 +17,7 @@
limitations under the License.
*/
package e2e
package capkk
import (
"context"

View File

@ -0,0 +1,91 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k3s
import (
"context"
"fmt"
"path/filepath"
. "github.com/onsi/ginkgo"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"github.com/kubesphere/kubekey/test/e2e/framework"
)
// Test suite constants for e2e config variables.
const (
KubernetesVersion = "KUBERNETES_VERSION"
KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT"
CNIPath = "CNI"
CNIResources = "CNI_RESOURCES"
IPFamily = "IP_FAMILY"
)
// Byf is a wrapper around By that formats its arguments.
func Byf(format string, a ...interface{}) {
By(fmt.Sprintf(format, a...))
}
func setupSpecNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc) {
Byf("Creating a namespace for hosting the %q test spec", specName)
namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
Creator: clusterProxy.GetClient(),
ClientSet: clusterProxy.GetClientSet(),
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()),
})
return namespace, cancelWatches
}
func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) {
Byf("Dumping logs from the %q workload cluster", cluster.Name)
// Dump all the logs from the workload cluster before deleting them.
clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name))
Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)
// Dump all Cluster API related resources to artifacts before deleting them.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
Lister: clusterProxy.GetClient(),
Namespace: namespace.Name,
LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
})
if !skipCleanup {
Byf("Deleting cluster %s", klog.KObj(cluster))
// While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance
// that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait
// instead of DeleteClusterAndWait
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
Client: clusterProxy.GetClient(),
Namespace: namespace.Name,
}, intervalsGetter(specName, "wait-delete-cluster")...)
Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
})
}
cancelWatches()
}

View File

@ -0,0 +1,18 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package k3s implements k3s end to end testing.
package k3s

View File

@ -0,0 +1,294 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k3s
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/kubesphere/kubekey/test/e2e/framework"
"github.com/kubesphere/kubekey/test/e2e/framework/bootstrap"
"github.com/kubesphere/kubekey/test/e2e/framework/clusterctl"
)
// Test suite flags.
var (
// configPath is the path to the e2e config file.
configPath string
// useExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply).
useExistingCluster bool
// artifactFolder is the folder to store e2e test artifacts.
artifactFolder string
// clusterctlConfig is the file which tests will use as a clusterctl config.
// If it is not set, a local clusterctl repository (including a clusterctl config) will be created automatically.
clusterctlConfig string
// alsoLogToFile enables additional logging to the 'ginkgo-log.txt' file in the artifact folder.
// These logs also contain timestamps.
alsoLogToFile bool
// skipCleanup prevents cleanup of test resources e.g. for debug purposes.
skipCleanup bool
)
// Test suite global vars.
var (
ctx = ctrl.SetupSignalHandler()
// e2eConfig to be used for this test, read from configPath.
e2eConfig *clusterctl.E2EConfig
// clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository
// with the providers specified in the configPath.
clusterctlConfigPath string
// bootstrapClusterProvider manages provisioning of the bootstrap cluster to be used for the e2e tests.
// Please note that provisioning will be skipped if e2e.use-existing-cluster is provided.
bootstrapClusterProvider bootstrap.ClusterProvider
// bootstrapClusterProxy allows to interact with the bootstrap cluster to be used for the e2e tests.
bootstrapClusterProxy framework.ClusterProxy
)
func init() {
flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file")
flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "folder where e2e test artifact should be stored")
flag.BoolVar(&alsoLogToFile, "e2e.also-log-to-file", true, "if true, ginkgo logs are additionally written to the `ginkgo-log.txt` file in the artifacts folder (including timestamps)")
flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped")
flag.StringVar(&clusterctlConfig, "e2e.clusterctl-config", "", "file which tests will use as a clusterctl config. If it is not set, a local clusterctl repository (including a clusterctl config) will be created automatically.")
flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)")
SetDefaultEventuallyTimeout(30 * time.Minute)
SetDefaultEventuallyPollingInterval(10 * time.Second)
}
func TestE2E(t *testing.T) {
g := NewWithT(t)
// If running in prow, make sure to use the artifacts folder that will be reported in test grid (ignoring the value provided by flag).
if prowArtifactFolder, exists := os.LookupEnv("ARTIFACTS"); exists {
artifactFolder = prowArtifactFolder
}
// ensure the artifacts folder exists
g.Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) //nolint:gosec
RegisterFailHandler(Fail)
if alsoLogToFile {
w, err := ginkgoextensions.EnableFileLogging(filepath.Join(artifactFolder, "ginkgo-log.txt"))
g.Expect(err).ToNot(HaveOccurred())
defer w.Close()
}
RunSpecs(t, "capkk-k3s-e2e")
}
// Using a SynchronizedBeforeSuite for controlling how to create resources shared across ParallelNodes (~ginkgo threads).
// The local clusterctl repository & the bootstrap cluster are created once and shared across all the tests.
var _ = SynchronizedBeforeSuite(func() []byte {
// Before all ParallelNodes.
Expect(configPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.")
By("Initializing a runtime.Scheme with all the GVK relevant for this test")
scheme := initScheme()
Byf("Loading the e2e test configuration from %q", configPath)
e2eConfig = loadE2EConfig(configPath)
if clusterctlConfig == "" {
Byf("Creating a clusterctl local repository into %q", artifactFolder)
clusterctlConfigPath = createClusterctlLocalRepository(e2eConfig, filepath.Join(artifactFolder, "repository"))
} else {
Byf("Using existing clusterctl config %q", clusterctlConfig)
clusterctlConfigPath = clusterctlConfig
}
By("Setting up the bootstrap cluster")
bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, scheme, useExistingCluster)
By("Initializing the bootstrap cluster")
initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder)
return []byte(
strings.Join([]string{
artifactFolder,
configPath,
clusterctlConfigPath,
bootstrapClusterProxy.GetKubeconfigPath(),
}, ","),
)
}, func(data []byte) {
// Before each ParallelNode.
parts := strings.Split(string(data), ",")
Expect(parts).To(HaveLen(4))
artifactFolder = parts[0]
configPath = parts[1]
clusterctlConfigPath = parts[2]
kubeconfigPath := parts[3]
e2eConfig = loadE2EConfig(configPath)
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme())
})
// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).
// The bootstrap cluster is shared across all the tests, so it should be deleted only after all ParallelNodes completes.
// The local clusterctl repository is preserved like everything else created into the artifact folder.
var _ = SynchronizedAfterSuite(func() {
// After each ParallelNode.
}, func() {
// After all ParallelNodes.
By("Dumping logs from the bootstrap cluster")
dumpBootstrapClusterLogs(bootstrapClusterProxy)
By("Tearing down the management cluster")
if !skipCleanup {
tearDown(bootstrapClusterProvider, bootstrapClusterProxy)
}
})
func initScheme() *runtime.Scheme {
sc := runtime.NewScheme()
framework.TryAddDefaultSchemes(sc)
return sc
}
func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath)
return config
}
func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFolder string) string {
createRepositoryInput := clusterctl.CreateRepositoryInput{
E2EConfig: config,
RepositoryFolder: repositoryFolder,
}
// Ensuring a CNI file is defined in the config and register a FileTransformation to inject the referenced file in place of the CNI_RESOURCES envSubst variable.
Expect(config.Variables).To(HaveKey(CNIPath), "Missing %s variable in the config", CNIPath)
cniPath := config.GetVariable(CNIPath)
Expect(cniPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", CNIPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(cniPath, CNIResources)
clusterctlConfig := clusterctl.CreateRepository(ctx, createRepositoryInput)
Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder)
return clusterctlConfig
}
func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, useExistingCluster bool) (bootstrap.ClusterProvider, framework.ClusterProxy) {
var clusterProvider bootstrap.ClusterProvider
kubeconfigPath := ""
if !useExistingCluster {
By("Creating the bootstrap cluster")
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
Name: config.ManagementClusterName,
KubernetesVersion: config.GetVariable(KubernetesVersionManagement),
RequiresDockerSock: config.HasDockerProvider(),
Images: config.Images,
IPFamily: config.GetVariable(IPFamily),
LogFolder: filepath.Join(artifactFolder, "kind"),
})
Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster")
kubeconfigPath = clusterProvider.GetKubeconfigPath()
Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the bootstrap cluster")
} else {
By("Using an existing bootstrap cluster")
}
clusterProxy := framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme)
Expect(clusterProxy).ToNot(BeNil(), "Failed to get a bootstrap cluster proxy")
return clusterProvider, clusterProxy
}
func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
}
func dumpBootstrapClusterLogs(bootstrapClusterProxy framework.ClusterProxy) {
if bootstrapClusterProxy == nil {
return
}
clusterLogCollector := bootstrapClusterProxy.GetLogCollector()
if clusterLogCollector == nil {
return
}
nodes, err := bootstrapClusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
fmt.Printf("Failed to get nodes for the bootstrap cluster: %v\n", err)
return
}
for i := range nodes.Items {
nodeName := nodes.Items[i].GetName()
err = clusterLogCollector.CollectMachineLog(
ctx,
bootstrapClusterProxy.GetClient(),
// The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
// we create a fake machine that wraps the node.
// NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
// This might not work if you are using an existing bootstrap cluster provided by other means.
&clusterv1.Machine{
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
},
filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "machines", nodeName),
)
if err != nil {
fmt.Printf("Failed to get logs for the bootstrap cluster node %s: %v\n", nodeName, err)
}
}
}
func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {
if bootstrapClusterProxy != nil {
bootstrapClusterProxy.Dispose(ctx)
}
if bootstrapClusterProvider != nil {
bootstrapClusterProvider.Dispose(ctx)
}
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k3s
import (
"context"
"fmt"
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/util"
"github.com/kubesphere/kubekey/test/e2e/framework"
"github.com/kubesphere/kubekey/test/e2e/framework/clusterctl"
)
// QuickStartSpecInput is the input for QuickStartSpec.
type QuickStartSpecInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string
SkipCleanup bool
ControlPlaneWaiters clusterctl.ControlPlaneWaiters
// Flavor, if specified is the template flavor used to create the cluster for testing.
// If not specified, and the e2econfig variable IPFamily is IPV6, then "ipv6" is used,
// otherwise the default flavor is used.
Flavor *string
}
// QuickStartSpec implements a spec that mimics the operation described in the Cluster API quick start, that is
// creating a workload cluster.
// This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test.
// NOTE: This test works with Clusters with and without ClusterClass.
func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) {
var (
specName = "quick-start"
input QuickStartSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)
BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})
It("Should create a workload cluster", func() {
By("Creating a workload cluster")
flavor := clusterctl.DefaultFlavor
if input.Flavor != nil {
flavor = *input.Flavor
}
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: flavor,
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
},
ControlPlaneWaiters: input.ControlPlaneWaiters,
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, clusterResources)
By("PASSED!")
})
AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}

View File

@ -0,0 +1,39 @@
//go:build e2e
// +build e2e
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k3s
import (
"context"
. "github.com/onsi/ginkgo"
)
var _ = Describe("Cluster Creation using Cluster API quick-start test [PR-Blocking]", func() {
By("Creating single-node control plane with one worker node")
QuickStartSpec(context.TODO(), func() QuickStartSpecInput {
return QuickStartSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
}
})
})

View File

@ -123,7 +123,7 @@ func Test_AddObjectHierarchy(t *testing.T) {
},
},
{
name: "KubeadmControlPlane and Machine owning DockerMachine are added",
name: "K3sControlPlane and Machine owning DockerMachine are added",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
@ -137,7 +137,7 @@ func Test_AddObjectHierarchy(t *testing.T) {
},
map[string]interface{}{
"apiVersion": clusterv1.GroupVersion.String(),
"kind": "KubeadmControlPlane",
"kind": "K3sControlPlane",
"name": "development-3961-4flkb",
},
},
@ -148,7 +148,7 @@ func Test_AddObjectHierarchy(t *testing.T) {
expectedKeysAndValues: []interface{}{
"Machine",
klog.ObjectRef{Namespace: metav1.NamespaceDefault, Name: "development-3961-4flkb-gzxnb"},
"KubeadmControlPlane",
"K3sControlPlane",
klog.ObjectRef{Namespace: metav1.NamespaceDefault, Name: "development-3961-4flkb"},
},
},