mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
Merge pull request #1357 from 24sama/master
cleanup: do not need to generate kubeadm config and kubelet service file when upgrading the k8s cluster
This commit is contained in:
commit
ed1680687e
|
|
@ -663,12 +663,11 @@ func (u *UpgradeKubeMaster) Execute(runtime connector.Runtime) error {
|
|||
if err := SetKubeletTasks(runtime, u.KubeAction); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("set kubelet failed: %s", host.GetName()))
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl restart kubelet", true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("restart kubelet failed: %s", host.GetName()))
|
||||
}
|
||||
|
||||
time.Sleep(30 * time.Second)
|
||||
time.Sleep(10 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -683,7 +682,7 @@ func (u *UpgradeKubeWorker) Execute(runtime connector.Runtime) error {
|
|||
if _, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubeadm upgrade node", true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade node using kubeadm failed: %s", host.GetName()))
|
||||
}
|
||||
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl stop kubelet", true); err != nil {
|
||||
if _, err := runtime.GetRunner().SudoCmd("systemctl stop kubelet", true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("stop kubelet failed: %s", host.GetName()))
|
||||
}
|
||||
if err := SetKubeletTasks(runtime, u.KubeAction); err != nil {
|
||||
|
|
@ -692,6 +691,8 @@ func (u *UpgradeKubeWorker) Execute(runtime connector.Runtime) error {
|
|||
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl restart kubelet", true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("restart kubelet failed: %s", host.GetName()))
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
if err := SyncKubeConfigTask(runtime, u.KubeAction); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("sync kube config to worker failed: %s", host.GetName()))
|
||||
}
|
||||
|
|
@ -700,14 +701,6 @@ func (u *UpgradeKubeWorker) Execute(runtime connector.Runtime) error {
|
|||
|
||||
func KubeadmUpgradeTasks(runtime connector.Runtime, u *UpgradeKubeMaster) error {
|
||||
host := runtime.RemoteHost()
|
||||
generateKubeadmConfig := &task.RemoteTask{
|
||||
Name: "GenerateKubeadmConfig",
|
||||
Desc: "Generate kubeadm config",
|
||||
Hosts: []connector.Host{host},
|
||||
Prepare: new(NotEqualDesiredVersion),
|
||||
Action: &GenerateKubeadmConfig{IsInitConfiguration: true},
|
||||
Parallel: false,
|
||||
}
|
||||
|
||||
kubeadmUpgrade := &task.RemoteTask{
|
||||
Name: "KubeadmUpgrade",
|
||||
|
|
@ -730,7 +723,6 @@ func KubeadmUpgradeTasks(runtime connector.Runtime, u *UpgradeKubeMaster) error
|
|||
}
|
||||
|
||||
tasks := []task.Interface{
|
||||
generateKubeadmConfig,
|
||||
kubeadmUpgrade,
|
||||
copyKubeConfig,
|
||||
}
|
||||
|
|
@ -752,22 +744,15 @@ type KubeadmUpgrade struct {
|
|||
func (k *KubeadmUpgrade) Execute(runtime connector.Runtime) error {
|
||||
host := runtime.RemoteHost()
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
|
||||
"timeout -k 600s 600s /usr/local/bin/kubeadm upgrade apply -y %s --config=/etc/kubernetes/kubeadm-config.yaml "+
|
||||
"timeout -k 600s 600s /usr/local/bin/kubeadm upgrade apply %s -y "+
|
||||
"--ignore-preflight-errors=all "+
|
||||
"--allow-experimental-upgrades "+
|
||||
"--allow-release-candidate-upgrades "+
|
||||
"--etcd-upgrade=false "+
|
||||
"--certificate-renewal=true "+
|
||||
"--force",
|
||||
"--certificate-renewal=true ",
|
||||
k.KubeConf.Cluster.Kubernetes.Version), false); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("upgrade master failed: %s", host.GetName()))
|
||||
}
|
||||
|
||||
if _, err := runtime.GetRunner().SudoCmd("systemctl daemon-reload && systemctl restart kubelet", true); err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("restart kubelet failed: %s", host.GetName()))
|
||||
}
|
||||
|
||||
time.Sleep(30 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -783,19 +768,6 @@ func SetKubeletTasks(runtime connector.Runtime, kubeAction common.KubeAction) er
|
|||
Retry: 2,
|
||||
}
|
||||
|
||||
generateKubeletService := &task.RemoteTask{
|
||||
Name: "GenerateKubeletService",
|
||||
Desc: "generate kubelet service",
|
||||
Hosts: []connector.Host{host},
|
||||
Prepare: new(NotEqualDesiredVersion),
|
||||
Action: &action.Template{
|
||||
Template: templates.KubeletService,
|
||||
Dst: filepath.Join("/etc/systemd/system/", templates.KubeletService.Name()),
|
||||
},
|
||||
Parallel: false,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
enableKubelet := &task.RemoteTask{
|
||||
Name: "EnableKubelet",
|
||||
Desc: "enable kubelet service",
|
||||
|
|
@ -806,21 +778,9 @@ func SetKubeletTasks(runtime connector.Runtime, kubeAction common.KubeAction) er
|
|||
Retry: 5,
|
||||
}
|
||||
|
||||
generateKubeletEnv := &task.RemoteTask{
|
||||
Name: "GenerateKubeletEnv",
|
||||
Desc: "generate kubelet env",
|
||||
Hosts: []connector.Host{host},
|
||||
Prepare: new(NotEqualDesiredVersion),
|
||||
Action: new(GenerateKubeletEnv),
|
||||
Parallel: false,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
tasks := []task.Interface{
|
||||
syncKubelet,
|
||||
generateKubeletService,
|
||||
enableKubelet,
|
||||
generateKubeletEnv,
|
||||
}
|
||||
|
||||
for i := range tasks {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ package pipelines
|
|||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
kubekeyapiv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
|
||||
"github.com/kubesphere/kubekey/pkg/artifact"
|
||||
"github.com/kubesphere/kubekey/pkg/bootstrap/confirm"
|
||||
|
|
@ -29,8 +32,6 @@ import (
|
|||
"github.com/kubesphere/kubekey/pkg/kubernetes"
|
||||
"github.com/kubesphere/kubekey/pkg/plugins"
|
||||
"github.com/kubesphere/kubekey/pkg/plugins/dns"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
kubekeycontroller "github.com/kubesphere/kubekey/controllers/kubekey"
|
||||
"github.com/kubesphere/kubekey/pkg/addons"
|
||||
|
|
@ -110,7 +111,7 @@ func NewCreateClusterPipeline(runtime *common.KubeRuntime) error {
|
|||
|
||||
Please check the result using the command:
|
||||
|
||||
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f
|
||||
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
|
||||
|
||||
`)
|
||||
} else {
|
||||
|
|
@ -200,7 +201,7 @@ func NewK3sCreateClusterPipeline(runtime *common.KubeRuntime) error {
|
|||
|
||||
Please check the result using the command:
|
||||
|
||||
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f
|
||||
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
|
||||
|
||||
`)
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Reference in New Issue