add migrate_cri

Signed-off-by: zhoutian <zhoutian@yunify.com>
This commit is contained in:
zhoutian 2022-09-22 18:22:22 +08:00
parent b731d2f47f
commit 0ea2a0ddfd
13 changed files with 868 additions and 6 deletions

45
cmd/ctl/cri/cri.go Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cri
import (
"github.com/kubesphere/kubekey/cmd/ctl/options"
"github.com/spf13/cobra"
)
type MigrateOptions struct {
CommonOptions *options.CommonOptions
}
func NewMigrateOptions() *MigrateOptions {
return &MigrateOptions{
CommonOptions: options.NewCommonOptions(),
}
}
// NewCmdMigrate creates a new Migrate command
func NewCmdCri() *cobra.Command {
o := NewMigrateOptions()
cmd := &cobra.Command{
Use: "cri",
Short: "cri",
}
o.CommonOptions.AddCommonFlag(cmd)
cmd.AddCommand(NewCmdMigrateCri())
return cmd
}

103
cmd/ctl/cri/migrate_cri.go Normal file
View File

@ -0,0 +1,103 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cri
import (
"github.com/pkg/errors"
"github.com/kubesphere/kubekey/cmd/ctl/options"
"github.com/kubesphere/kubekey/cmd/ctl/util"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/pipelines"
"github.com/spf13/cobra"
)
type MigrateCriOptions struct {
CommonOptions *options.CommonOptions
ClusterCfgFile string
Kubernetes string
EnableKubeSphere bool
KubeSphere string
DownloadCmd string
Artifact string
Type string
Role string
}
func NewMigrateCriOptions() *MigrateCriOptions {
return &MigrateCriOptions{
CommonOptions: options.NewCommonOptions(),
}
}
// NewCmdDeleteCluster creates a new delete cluster command
func NewCmdMigrateCri() *cobra.Command {
o := NewMigrateCriOptions()
cmd := &cobra.Command{
Use: "migrate",
Short: "Migrate a container",
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(o.Validate())
util.CheckErr(o.Run())
},
}
o.CommonOptions.AddCommonFlag(cmd)
o.AddFlags(cmd)
return cmd
}
func (o *MigrateCriOptions) Run() error {
arg := common.Argument{
FilePath: o.ClusterCfgFile,
Debug: o.CommonOptions.Verbose,
KubernetesVersion: o.Kubernetes,
Type: o.Type,
Role: o.Role,
}
return pipelines.MigrateCri(arg, o.DownloadCmd)
}
func (o *MigrateCriOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().StringVarP(&o.Role, "role", "", "", "Role groups for migrating. Support: master, worker, all.")
cmd.Flags().StringVarP(&o.Type, "type", "", "", "Type of target CRI. Support: docker, containerd.")
cmd.Flags().StringVarP(&o.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
cmd.Flags().StringVarP(&o.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
cmd.Flags().StringVarP(&o.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
`The user defined command to download the necessary binary files. The first param '%s' is output path, the second param '%s', is the URL`)
cmd.Flags().StringVarP(&o.Artifact, "artifact", "a", "", "Path to a KubeKey artifact")
}
func (o *MigrateCriOptions) Validate() error {
if o.Role == "" {
return errors.New("node Role can not be empty")
}
if o.Role != common.Worker && o.Role != common.Master && o.Role != "all" {
return errors.Errorf("node Role is invalid: %s", o.Role)
}
if o.Type == "" {
return errors.New("cri Type can not be empty")
}
if o.Type != common.Docker && o.Type != common.Conatinerd {
return errors.Errorf("cri Type is invalid: %s", o.Type)
}
if o.ClusterCfgFile == "" {
return errors.New("configuration file can not be empty")
}
return nil
}

View File

@ -18,11 +18,18 @@ package ctl
import (
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"syscall"
"github.com/kubesphere/kubekey/cmd/ctl/add"
"github.com/kubesphere/kubekey/cmd/ctl/artifact"
"github.com/kubesphere/kubekey/cmd/ctl/cert"
"github.com/kubesphere/kubekey/cmd/ctl/completion"
"github.com/kubesphere/kubekey/cmd/ctl/create"
"github.com/kubesphere/kubekey/cmd/ctl/cri"
"github.com/kubesphere/kubekey/cmd/ctl/delete"
initOs "github.com/kubesphere/kubekey/cmd/ctl/init"
"github.com/kubesphere/kubekey/cmd/ctl/options"
@ -30,11 +37,6 @@ import (
"github.com/kubesphere/kubekey/cmd/ctl/upgrade"
"github.com/kubesphere/kubekey/cmd/ctl/version"
"github.com/spf13/cobra"
"os"
"os/exec"
"runtime"
"strings"
"syscall"
)
type KubeKeyOptions struct {
@ -115,6 +117,8 @@ func NewKubeKeyCommand(o KubeKeyOptions) *cobra.Command {
cmds.AddCommand(completion.NewCmdCompletion())
cmds.AddCommand(version.NewCmdVersion())
cmds.AddCommand(cri.NewCmdCri())
return cmds
}

View File

@ -0,0 +1,48 @@
# NAME
**kk cri migrate**: migrate your cri smoothly to docker/containerd with this command.
# DESCRIPTION
migrate your cri smoothly to docker/containerd with this command.
# OPTIONS
## **--role**
Which node(worker/master/all) to migrate.
## **--type**
Which cri(docker/containerd) to migrate.
## **--debug**
Print detailed information. The default is `false`.
## **--filename, -f**
Path to a configuration file.This option is required.
## **--yes, -y**
Skip confirm check. The default is `false`.
# EXAMPLES
Migrate your master node's cri smoothly to docker.
```
$ ./kk cri migrate --role master --type docker -f config-sample.yaml
```
Migrate your master node's cri smoothly to containerd.
```
$ ./kk cri migrate --role master --type containerd -f config-sample.yaml
```
Migrate your worker node's cri smoothly to docker.
```
$ ./kk cri migrate --role worker --type docker -f config-sample.yaml
```
Migrate your worker node's cri smoothly to containerd.
```
$ ./kk cri migrate --role worker --type containerd -f config-sample.yaml
```
Migrate all your node's cri smoothly to containerd.
```
$ ./kk cri migrate --role all --type containerd -f config-sample.yaml
```
Migrate all your node's cri smoothly to docker.
```
$ ./kk cri migrate --role all --type docker -f config-sample.yaml
```

View File

@ -18,6 +18,8 @@ package binaries
import (
"fmt"
"os/exec"
kubekeyapiv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/core/cache"
@ -25,7 +27,6 @@ import (
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/files"
"github.com/pkg/errors"
"os/exec"
)
// K8sFilesDownloadHTTP defines the kubernetes' binaries that need to be downloaded in advance and downloads them.
@ -145,3 +146,47 @@ func KubernetesArtifactBinariesDownload(manifest *common.ArtifactManifest, path,
return nil
}
// CriDownloadHTTP defines the kubernetes' binaries that need to be downloaded in advance and downloads them.
func CriDownloadHTTP(kubeConf *common.KubeConf, path, arch string, pipelineCache *cache.Cache) error {
binaries := []*files.KubeBinary{}
switch kubeConf.Arg.Type {
case common.Docker:
docker := files.NewKubeBinary("docker", arch, kubekeyapiv1alpha2.DefaultDockerVersion, path, kubeConf.Arg.DownloadCommand)
binaries = append(binaries, docker)
case common.Conatinerd:
containerd := files.NewKubeBinary("containerd", arch, kubekeyapiv1alpha2.DefaultContainerdVersion, path, kubeConf.Arg.DownloadCommand)
runc := files.NewKubeBinary("runc", arch, kubekeyapiv1alpha2.DefaultRuncVersion, path, kubeConf.Arg.DownloadCommand)
crictl := files.NewKubeBinary("crictl", arch, kubekeyapiv1alpha2.DefaultCrictlVersion, path, kubeConf.Arg.DownloadCommand)
binaries = append(binaries, containerd, runc, crictl)
default:
}
binariesMap := make(map[string]*files.KubeBinary)
for _, binary := range binaries {
if err := binary.CreateBaseDir(); err != nil {
return errors.Wrapf(errors.WithStack(err), "create file %s base dir failed", binary.FileName)
}
logger.Log.Messagef(common.LocalHost, "downloading %s %s %s ...", arch, binary.ID, binary.Version)
binariesMap[binary.ID] = binary
if util.IsExist(binary.Path()) {
// download it again if it's incorrect
if err := binary.SHA256Check(); err != nil {
p := binary.Path()
_ = exec.Command("/bin/sh", "-c", fmt.Sprintf("rm -f %s", p)).Run()
} else {
logger.Log.Messagef(common.LocalHost, "%s is existed", binary.ID)
continue
}
}
if err := binary.Download(); err != nil {
return fmt.Errorf("Failed to download %s binary: %s error: %w ", binary.ID, binary.GetCmd(), err)
}
}
pipelineCache.Set(common.KubeBinaries+"-"+arch, binariesMap)
return nil
}

View File

@ -159,3 +159,34 @@ func (n *RegistryPackageModule) Init() {
download,
}
}
type CriBinariesModule struct {
common.KubeModule
}
func (i *CriBinariesModule) Init() {
i.Name = "CriBinariesModule"
i.Desc = "Download Cri package"
switch i.KubeConf.Arg.Type {
case common.Docker:
i.Tasks = CriBinaries(i)
case common.Conatinerd:
i.Tasks = CriBinaries(i)
default:
}
}
func CriBinaries(p *CriBinariesModule) []task.Interface {
download := &task.LocalTask{
Name: "DownloadCriPackage",
Desc: "Download Cri package",
Action: new(CriDownload),
}
p.Tasks = []task.Interface{
download,
}
return p.Tasks
}

View File

@ -276,3 +276,29 @@ func (k *RegistryPackageDownload) Execute(runtime connector.Runtime) error {
return nil
}
type CriDownload struct {
common.KubeAction
}
func (d *CriDownload) Execute(runtime connector.Runtime) error {
cfg := d.KubeConf.Cluster
archMap := make(map[string]bool)
for _, host := range cfg.Hosts {
switch host.Arch {
case "amd64":
archMap["amd64"] = true
case "arm64":
archMap["arm64"] = true
default:
return errors.New(fmt.Sprintf("Unsupported architecture: %s", host.Arch))
}
}
for arch := range archMap {
if err := CriDownloadHTTP(d.KubeConf, runtime.GetWorkDir(), arch, d.PipelineCache); err != nil {
return err
}
}
return nil
}

View File

@ -127,3 +127,23 @@ func (c *CheckFileExistModule) Init() {
check,
}
}
type MigrateCriConfirmModule struct {
common.KubeModule
}
func (d *MigrateCriConfirmModule) Init() {
d.Name = "MigrateCriConfirmModule"
d.Desc = "Display Migrate Cri form"
display := &task.LocalTask{
Name: "ConfirmForm",
Desc: "Display confirmation form",
Action: &MigrateCri{},
}
d.Tasks = []task.Interface{
display,
}
}

View File

@ -339,3 +339,32 @@ func (c *CheckFile) Execute(runtime connector.Runtime) error {
}
return nil
}
type MigrateCri struct {
common.KubeAction
}
func (d *MigrateCri) Execute(runtime connector.Runtime) error {
reader := bufio.NewReader(os.Stdin)
confirmOK := false
for !confirmOK {
fmt.Printf("Are you sure to Migrate Cri? [yes/no]: ")
input, err := reader.ReadString('\n')
if err != nil {
return err
}
input = strings.ToLower(strings.TrimSpace(input))
switch strings.ToLower(input) {
case "yes", "y":
confirmOK = true
case "no", "n":
os.Exit(0)
default:
continue
}
}
return nil
}

View File

@ -54,6 +54,8 @@ type Argument struct {
ImagesDir string
Namespace string
DeleteCRI bool
Role string
Type string
}
func NewKubeRuntime(flag string, arg Argument) (*KubeRuntime, error) {

View File

@ -19,11 +19,19 @@ package container
import (
"fmt"
"path/filepath"
"strings"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/container/templates"
"github.com/kubesphere/kubekey/pkg/core/action"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/prepare"
"github.com/kubesphere/kubekey/pkg/core/task"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/files"
"github.com/kubesphere/kubekey/pkg/images"
"github.com/kubesphere/kubekey/pkg/registry"
"github.com/kubesphere/kubekey/pkg/utils"
"github.com/pkg/errors"
)
@ -166,3 +174,356 @@ func (d *DisableContainerd) Execute(runtime connector.Runtime) error {
}
return nil
}
type CordonNode struct {
common.KubeAction
}
func (d *CordonNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl cordon %s ", nodeName), true); err != nil {
return errors.Wrap(err, fmt.Sprintf("cordon the node: %s failed", nodeName))
}
return nil
}
type UnCordonNode struct {
common.KubeAction
}
func (d *UnCordonNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
f := true
for f {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl uncordon %s", nodeName), true); err == nil {
break
}
}
return nil
}
type DrainNode struct {
common.KubeAction
}
func (d *DrainNode) Execute(runtime connector.Runtime) error {
nodeName := runtime.RemoteHost().GetName()
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("/usr/local/bin/kubectl drain %s --delete-emptydir-data --ignore-daemonsets --timeout=2m --force", nodeName), true); err != nil {
return errors.Wrap(err, fmt.Sprintf("drain the node: %s failed", nodeName))
}
return nil
}
type RestartCri struct {
common.KubeAction
}
func (i *RestartCri) Execute(runtime connector.Runtime) error {
switch i.KubeConf.Arg.Type {
case common.Docker:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart docker "), true); err != nil {
return errors.Wrap(err, "restart docker")
}
case common.Conatinerd:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl daemon-reload && systemctl restart containerd"), true); err != nil {
return errors.Wrap(err, "restart containerd")
}
default:
logger.Log.Fatalf("Unsupported container runtime: %s", strings.TrimSpace(i.KubeConf.Arg.Type))
}
return nil
}
type EditKubeletCri struct {
common.KubeAction
}
func (i *EditKubeletCri) Execute(runtime connector.Runtime) error {
switch i.KubeConf.Arg.Type {
case common.Docker:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"sed -i 's#--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod#--pod#' /var/lib/kubelet/kubeadm-flags.env"),
true); err != nil {
return errors.Wrap(err, "Change KubeletTo Containerd failed")
}
case common.Conatinerd:
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
"sed -i 's#--network-plugin=cni --pod#--network-plugin=cni --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --pod#' /var/lib/kubelet/kubeadm-flags.env"),
true); err != nil {
return errors.Wrap(err, "Change KubeletTo Containerd failed")
}
default:
logger.Log.Fatalf("Unsupported container runtime: %s", strings.TrimSpace(i.KubeConf.Arg.Type))
}
return nil
}
type RestartKubeletNode struct {
common.KubeAction
}
func (d *RestartKubeletNode) Execute(runtime connector.Runtime) error {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("systemctl restart kubelet"), true); err != nil {
return errors.Wrap(err, "RestartNode Kube failed")
}
return nil
}
func MigrateSelfNodeCriTasks(runtime connector.Runtime, kubeAction common.KubeAction) error {
host := runtime.RemoteHost()
tasks := []task.Interface{}
CordonNode := &task.RemoteTask{
Name: "CordonNode",
Desc: "Cordon Node",
Hosts: []connector.Host{host},
Action: new(CordonNode),
Parallel: false,
}
DrainNode := &task.RemoteTask{
Name: "DrainNode",
Desc: "Drain Node",
Hosts: []connector.Host{host},
Action: new(DrainNode),
Parallel: false,
}
RestartCri := &task.RemoteTask{
Name: "RestartCri",
Desc: "Restart Cri",
Hosts: []connector.Host{host},
Action: new(RestartCri),
Parallel: false,
}
EditKubeletCri := &task.RemoteTask{
Name: "EditKubeletCri",
Desc: "Edit Kubelet Cri",
Hosts: []connector.Host{host},
Action: new(EditKubeletCri),
Parallel: false,
}
RestartKubeletNode := &task.RemoteTask{
Name: "RestartKubeletNode",
Desc: "Restart Kubelet Node",
Hosts: []connector.Host{host},
Action: new(RestartKubeletNode),
Parallel: false,
}
UnCordonNode := &task.RemoteTask{
Name: "UnCordonNode",
Desc: "UnCordon Node",
Hosts: []connector.Host{host},
Action: new(UnCordonNode),
Parallel: false,
}
switch kubeAction.KubeConf.Cluster.Kubernetes.ContainerManager {
case common.Docker:
Uninstall := &task.RemoteTask{
Name: "DisableDocker",
Desc: "Disable docker",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&DockerExist{Not: false},
},
Action: new(DisableDocker),
Parallel: false,
}
tasks = append(tasks, CordonNode, DrainNode, Uninstall)
case common.Conatinerd:
Uninstall := &task.RemoteTask{
Name: "UninstallContainerd",
Desc: "Uninstall containerd",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: false},
},
Action: new(DisableContainerd),
Parallel: false,
}
tasks = append(tasks, CordonNode, DrainNode, Uninstall)
}
if kubeAction.KubeConf.Arg.Type == common.Docker {
syncBinaries := &task.RemoteTask{
Name: "SyncDockerBinaries",
Desc: "Sync docker binaries",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
// &kubernetes.NodeInCluster{Not: true},
&DockerExist{Not: true},
},
Action: new(SyncDockerBinaries),
Parallel: false,
}
generateDockerService := &task.RemoteTask{
Name: "GenerateDockerService",
Desc: "Generate docker service",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
// &kubernetes.NodeInCluster{Not: true},
&DockerExist{Not: true},
},
Action: &action.Template{
Template: templates.DockerService,
Dst: filepath.Join("/etc/systemd/system", templates.DockerService.Name()),
},
Parallel: false,
}
generateDockerConfig := &task.RemoteTask{
Name: "GenerateDockerConfig",
Desc: "Generate docker config",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
// &kubernetes.NodeInCluster{Not: true},
&DockerExist{Not: true},
},
Action: &action.Template{
Template: templates.DockerConfig,
Dst: filepath.Join("/etc/docker/", templates.DockerConfig.Name()),
Data: util.Data{
"Mirrors": templates.Mirrors(kubeAction.KubeConf),
"InsecureRegistries": templates.InsecureRegistries(kubeAction.KubeConf),
"DataRoot": templates.DataRoot(kubeAction.KubeConf),
},
},
Parallel: false,
}
enableDocker := &task.RemoteTask{
Name: "EnableDocker",
Desc: "Enable docker",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
// &kubernetes.NodeInCluster{Not: true},
&DockerExist{Not: true},
},
Action: new(EnableDocker),
Parallel: false,
}
dockerLoginRegistry := &task.RemoteTask{
Name: "Login PrivateRegistry",
Desc: "Add auths to container runtime",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
// &kubernetes.NodeInCluster{Not: true},
&DockerExist{},
&PrivateRegistryAuth{},
},
Action: new(DockerLoginRegistry),
Parallel: false,
}
tasks = append(tasks, syncBinaries, generateDockerService, generateDockerConfig, enableDocker, dockerLoginRegistry,
RestartCri, EditKubeletCri, RestartKubeletNode, UnCordonNode)
}
if kubeAction.KubeConf.Arg.Type == common.Conatinerd {
syncContainerd := &task.RemoteTask{
Name: "SyncContainerd",
Desc: "Sync containerd binaries",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: new(SyncContainerd),
Parallel: false,
}
syncCrictlBinaries := &task.RemoteTask{
Name: "SyncCrictlBinaries",
Desc: "Sync crictl binaries",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&CrictlExist{Not: true},
},
Action: new(SyncCrictlBinaries),
Parallel: false,
}
generateContainerdService := &task.RemoteTask{
Name: "GenerateContainerdService",
Desc: "Generate containerd service",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Template: templates.ContainerdService,
Dst: filepath.Join("/etc/systemd/system", templates.ContainerdService.Name()),
},
Parallel: false,
}
generateContainerdConfig := &task.RemoteTask{
Name: "GenerateContainerdConfig",
Desc: "Generate containerd config",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Template: templates.ContainerdConfig,
Dst: filepath.Join("/etc/containerd/", templates.ContainerdConfig.Name()),
Data: util.Data{
"Mirrors": templates.Mirrors(kubeAction.KubeConf),
"InsecureRegistries": kubeAction.KubeConf.Cluster.Registry.InsecureRegistries,
"SandBoxImage": images.GetImage(runtime, kubeAction.KubeConf, "pause").ImageName(),
"Auths": registry.DockerRegistryAuthEntries(kubeAction.KubeConf.Cluster.Registry.Auths),
"DataRoot": templates.DataRoot(kubeAction.KubeConf),
},
},
Parallel: false,
}
generateCrictlConfig := &task.RemoteTask{
Name: "GenerateCrictlConfig",
Desc: "Generate crictl config",
Hosts: []connector.Host{host},
Prepare: &prepare.PrepareCollection{
&ContainerdExist{Not: true},
},
Action: &action.Template{
Template: templates.CrictlConfig,
Dst: filepath.Join("/etc/", templates.CrictlConfig.Name()),
Data: util.Data{
"Endpoint": kubeAction.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint,
},
},
Parallel: false,
}
enableContainerd := &task.RemoteTask{
Name: "EnableContainerd",
Desc: "Enable containerd",
Hosts: []connector.Host{host},
// Prepare: &prepare.PrepareCollection{
// &ContainerdExist{Not: true},
// },
Action: new(EnableContainerd),
Parallel: false,
}
tasks = append(tasks, syncContainerd, syncCrictlBinaries, generateContainerdService, generateContainerdConfig,
generateCrictlConfig, enableContainerd, RestartCri, EditKubeletCri, RestartKubeletNode, UnCordonNode)
}
for i := range tasks {
t := tasks[i]
t.Init(runtime, kubeAction.ModuleCache, kubeAction.PipelineCache)
if res := t.Execute(); res.IsFailed() {
return res.CombineErr()
}
}
return nil
}
type MigrateSelfNodeCri struct {
common.KubeAction
}
func (d *MigrateSelfNodeCri) Execute(runtime connector.Runtime) error {
if err := MigrateSelfNodeCriTasks(runtime, d.KubeAction); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("MigrateSelfNodeCriTasks failed:"))
}
return nil
}

View File

@ -308,3 +308,81 @@ func UninstallContainerd(m *UninstallContainerModule) []task.Interface {
disableContainerd,
}
}
type CriMigrateModule struct {
common.KubeModule
Skip bool
}
func (i *CriMigrateModule) IsSkip() bool {
return i.Skip
}
func (p *CriMigrateModule) Init() {
p.Name = "CriMigrateModule"
p.Desc = "Cri Migrate manager"
if p.KubeConf.Arg.Role == common.Worker {
p.Tasks = MigrateWCri(p)
} else if p.KubeConf.Arg.Role == common.Master {
p.Tasks = MigrateMCri(p)
} else if p.KubeConf.Arg.Role == "all" {
p.Tasks = MigrateACri(p)
} else {
logger.Log.Fatalf("Unsupported Role: %s", strings.TrimSpace(p.KubeConf.Arg.Role))
}
}
func MigrateWCri(p *CriMigrateModule) []task.Interface {
MigrateWCri := &task.RemoteTask{
Name: "MigrateToDocker",
Desc: "Migrate To Docker",
Hosts: p.Runtime.GetHostsByRole(common.Worker),
Prepare: new(common.OnlyWorker),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateWCri,
}
return p.Tasks
}
func MigrateMCri(p *CriMigrateModule) []task.Interface {
MigrateMCri := &task.RemoteTask{
Name: "MigrateMasterToDocker",
Desc: "Migrate Master To Docker",
Hosts: p.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.IsMaster),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateMCri,
}
return p.Tasks
}
func MigrateACri(p *CriMigrateModule) []task.Interface {
MigrateACri := &task.RemoteTask{
Name: "MigrateMasterToDocker",
Desc: "Migrate Master To Docker",
Hosts: p.Runtime.GetHostsByRole(common.K8s),
Action: new(MigrateSelfNodeCri),
Parallel: false,
}
p.Tasks = []task.Interface{
MigrateACri,
}
return p.Tasks
}

View File

@ -0,0 +1,70 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipelines
import (
"fmt"
"github.com/kubesphere/kubekey/pkg/binaries"
"github.com/kubesphere/kubekey/pkg/bootstrap/confirm"
"github.com/kubesphere/kubekey/pkg/bootstrap/precheck"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/container"
"github.com/kubesphere/kubekey/pkg/core/module"
"github.com/kubesphere/kubekey/pkg/core/pipeline"
)
func MigrateCriPipeline(runtime *common.KubeRuntime) error {
fmt.Println("MigrateContainerdPipeline called")
m := []module.Module{
&precheck.GreetingsModule{},
&confirm.MigrateCriConfirmModule{},
&binaries.CriBinariesModule{},
&container.CriMigrateModule{},
}
p := pipeline.Pipeline{
Name: "MigrateContainerdPipeline",
Modules: m,
Runtime: runtime,
}
if err := p.Start(); err != nil {
return err
}
return nil
}
func MigrateCri(args common.Argument, downloadCmd string) error {
args.DownloadCommand = func(path, url string) string {
return fmt.Sprintf(downloadCmd, path, url)
}
var loaderType string
if args.FilePath != "" {
loaderType = common.File
} else {
loaderType = common.AllInOne
}
runtime, err := common.NewKubeRuntime(loaderType, args)
if err != nil {
fmt.Println(err)
return err
}
if err := MigrateCriPipeline(runtime); err != nil {
fmt.Println(err)
return err
}
return nil
}