mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
change upgrade policy
Signed-off-by: pixiake <guofeng@yunify.com>
This commit is contained in:
parent
d7b0c64be3
commit
d7be77b425
|
|
@ -21,34 +21,27 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
clusterCfgFile string
|
||||
kubernetes string
|
||||
kubesphere bool
|
||||
skipCheck bool
|
||||
)
|
||||
|
||||
// clusterCmd represents the cluster command
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Create a Kubernetes or KubeSphere cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
var ksVersion string
|
||||
if kubesphere && len(args) > 0 {
|
||||
if opt.Kubesphere && len(args) > 0 {
|
||||
ksVersion = args[0]
|
||||
} else {
|
||||
ksVersion = ""
|
||||
}
|
||||
logger := util.InitLogger(verbose)
|
||||
return install.CreateCluster(clusterCfgFile, kubernetes, ksVersion, logger, kubesphere, verbose, skipCheck)
|
||||
logger := util.InitLogger(opt.Verbose)
|
||||
return install.CreateCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, logger, opt.Kubesphere, opt.Verbose, opt.SkipCheck)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
createCmd.AddCommand(clusterCmd)
|
||||
|
||||
clusterCmd.Flags().StringVarP(&clusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
clusterCmd.Flags().StringVarP(&kubernetes, "with-kubernetes", "", "v1.17.9", "Specify a supported version of kubernetes")
|
||||
clusterCmd.Flags().BoolVarP(&kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.0.0)")
|
||||
clusterCmd.Flags().BoolVarP(&skipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
clusterCmd.Flags().StringVarP(&opt.ClusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
clusterCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
|
||||
clusterCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.0.0)")
|
||||
clusterCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,20 +20,18 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var addons, name, clusterCfgPath string
|
||||
|
||||
// configCmd represents the config command
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Create cluster configuration file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
var ksVersion string
|
||||
if kubesphere && len(args) > 0 {
|
||||
if opt.Kubesphere && len(args) > 0 {
|
||||
ksVersion = args[0]
|
||||
} else {
|
||||
ksVersion = ""
|
||||
}
|
||||
err := config.GenerateClusterObj(addons, kubernetes, ksVersion, name, clusterCfgPath, kubesphere)
|
||||
err := config.GenerateClusterObj(opt.Addons, opt.Kubernetes, ksVersion, opt.Name, opt.Kubeconfig, opt.ClusterCfgPath, opt.Kubesphere, opt.FromCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -43,9 +41,11 @@ var configCmd = &cobra.Command{
|
|||
|
||||
func init() {
|
||||
createCmd.AddCommand(configCmd)
|
||||
configCmd.Flags().StringVarP(&addons, "with-storage", "", "", "Add storage plugins")
|
||||
configCmd.Flags().StringVarP(&name, "name", "", "config-sample", "Specify a name of cluster object")
|
||||
configCmd.Flags().StringVarP(&clusterCfgPath, "file", "f", "", "Specify a configuration file path")
|
||||
configCmd.Flags().StringVarP(&kubernetes, "with-kubernetes", "", "v1.17.8", "Specify a supported version of kubernetes")
|
||||
configCmd.Flags().BoolVarP(&kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.0.0)")
|
||||
configCmd.Flags().StringVarP(&opt.Addons, "with-storage", "", "", "Add storage plugins")
|
||||
configCmd.Flags().StringVarP(&opt.Name, "name", "", "config-sample", "Specify a name of cluster object")
|
||||
configCmd.Flags().StringVarP(&opt.ClusterCfgPath, "file", "f", "", "Specify a configuration file path")
|
||||
configCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
|
||||
configCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.0.0)")
|
||||
configCmd.Flags().BoolVarP(&opt.FromCluster, "from-cluster", "", false, "Create a configuration based on existing cluster")
|
||||
configCmd.Flags().StringVarP(&opt.Kubeconfig, "kubeconfig", "", "", "Specify a kubeconfig file")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,13 +26,13 @@ var deleteCmd = &cobra.Command{
|
|||
Use: "delete",
|
||||
Short: "Delete a cluster",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger := util.InitLogger(verbose)
|
||||
delete.ResetCluster(clusterCfgFile, logger, verbose)
|
||||
logger := util.InitLogger(opt.Verbose)
|
||||
delete.ResetCluster(opt.ClusterCfgFile, logger, opt.Verbose)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(deleteCmd)
|
||||
|
||||
deleteCmd.Flags().StringVarP(&clusterCfgFile, "config", "f", "", "Path to a configuration file")
|
||||
deleteCmd.Flags().StringVarP(&opt.ClusterCfgFile, "config", "f", "", "Path to a configuration file")
|
||||
}
|
||||
|
|
|
|||
20
cmd/root.go
20
cmd/root.go
|
|
@ -21,7 +21,22 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
var verbose bool
|
||||
type Options struct {
|
||||
Verbose bool
|
||||
Addons string
|
||||
Name string
|
||||
ClusterCfgPath string
|
||||
Kubeconfig string
|
||||
FromCluster bool
|
||||
ClusterCfgFile string
|
||||
Kubernetes string
|
||||
Kubesphere bool
|
||||
SkipCheck bool
|
||||
}
|
||||
|
||||
var (
|
||||
opt Options
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
var rootCmd = &cobra.Command{
|
||||
|
|
@ -46,8 +61,7 @@ func init() {
|
|||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&verbose, "debug", true, "Print detailed information")
|
||||
rootCmd.PersistentFlags().BoolVar(&opt.Verbose, "debug", true, "Print detailed information")
|
||||
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
|
|
|
|||
|
|
@ -26,13 +26,13 @@ var scaleCmd = &cobra.Command{
|
|||
Use: "scale",
|
||||
Short: "Scale a cluster according to the new nodes information from the specified configuration file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := util.InitLogger(verbose)
|
||||
return scale.ScaleCluster(clusterCfgFile, "", "", logger, false, verbose, skipCheck)
|
||||
logger := util.InitLogger(opt.Verbose)
|
||||
return scale.ScaleCluster(opt.ClusterCfgFile, "", "", logger, false, opt.Verbose, opt.SkipCheck)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scaleCmd)
|
||||
scaleCmd.Flags().StringVarP(&clusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
scaleCmd.Flags().BoolVarP(&skipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
scaleCmd.Flags().StringVarP(&opt.ClusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
scaleCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,12 +26,20 @@ var upgradeCmd = &cobra.Command{
|
|||
Use: "upgrade",
|
||||
Short: "Upgrade your cluster smoothly to a newer version with this command",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := util.InitLogger(verbose)
|
||||
return upgrade.UpgradeCluster(clusterCfgFile, "", "", logger, false, verbose)
|
||||
logger := util.InitLogger(opt.Verbose)
|
||||
var ksVersion string
|
||||
if opt.Kubesphere && len(args) > 0 {
|
||||
ksVersion = args[0]
|
||||
} else {
|
||||
ksVersion = ""
|
||||
}
|
||||
return upgrade.UpgradeCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, logger, opt.Kubesphere, opt.Verbose)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(upgradeCmd)
|
||||
upgradeCmd.Flags().StringVarP(&clusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
upgradeCmd.Flags().StringVarP(&opt.ClusterCfgFile, "file", "f", "", "Path to a configuration file")
|
||||
upgradeCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
|
||||
upgradeCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.0.0)")
|
||||
}
|
||||
|
|
|
|||
19
go.mod
19
go.mod
|
|
@ -1,20 +1,29 @@
|
|||
module github.com/kubesphere/kubekey
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/imdario/mergo v0.3.10 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/lithammer/dedent v1.1.0
|
||||
github.com/mitchellh/mapstructure v1.3.1
|
||||
github.com/mitchellh/mapstructure v1.3.3
|
||||
github.com/modood/table v0.0.0-20200225102042-88de94bb9876
|
||||
github.com/operator-framework/operator-sdk v0.19.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/tmc/scp v0.0.0-20170824174625-f7b48647feef
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
k8s.io/apimachinery v0.18.3
|
||||
k8s.io/api v0.18.6
|
||||
k8s.io/apimachinery v0.18.6
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/utils v0.0.0-20200724153422-f32512634ab7 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.6.1
|
||||
)
|
||||
|
||||
replace k8s.io/client-go => k8s.io/client-go v0.18.0
|
||||
|
|
|
|||
|
|
@ -5,25 +5,100 @@ import (
|
|||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/cluster/kubernetes/tmpl"
|
||||
"github.com/kubesphere/kubekey/pkg/cluster/preinstall"
|
||||
"github.com/kubesphere/kubekey/pkg/files"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/kubesphere/kubekey/pkg/util/ssh"
|
||||
"github.com/pkg/errors"
|
||||
versionutil "k8s.io/apimachinery/pkg/util/version"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func UpgradeKubeMasters(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Upgrading kube masters")
|
||||
return mgr.RunTaskOnMasterNodes(upgradeKubeMasters, false)
|
||||
var (
|
||||
currentVersions = make(map[string]string)
|
||||
currentVersionStr string
|
||||
nextVersionStr string
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
func GetCurrentVersions(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Get current version")
|
||||
return mgr.RunTaskOnK8sNodes(getCurrentVersion, true)
|
||||
}
|
||||
|
||||
func upgradeKubeMasters(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
version, err := mgr.Runner.ExecuteCmd("/usr/local/bin/kubelet --version", 3, false)
|
||||
func getCurrentVersion(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
kubeletVersionInfo, err := mgr.Runner.ExecuteCmd("sudo -E /bin/sh -c \"/usr/local/bin/kubelet --version\"", 3, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to get current kubelet version")
|
||||
}
|
||||
if strings.Split(version, " ")[1] != mgr.Cluster.Kubernetes.Version {
|
||||
kubeletVersionStr := strings.Split(kubeletVersionInfo, " ")[1]
|
||||
mu.Lock()
|
||||
currentVersions[kubeletVersionStr] = kubeletVersionStr
|
||||
if minVersion, err := getMinVersion(currentVersions); err != nil {
|
||||
return err
|
||||
} else {
|
||||
currentVersions = make(map[string]string)
|
||||
currentVersions[minVersion] = minVersion
|
||||
currentVersionStr = fmt.Sprintf("v%s", minVersion)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
if node.IsMaster {
|
||||
apiserverVersionStr, err := mgr.Runner.ExecuteCmd("sudo -E /bin/sh -c \"cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep 'image:' | cut -d ':' -f 3\"", 3, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to get current kube-apiserver version")
|
||||
}
|
||||
mu.Lock()
|
||||
currentVersions[apiserverVersionStr] = apiserverVersionStr
|
||||
if minVersion, err := getMinVersion(currentVersions); err != nil {
|
||||
return err
|
||||
} else {
|
||||
currentVersions = make(map[string]string)
|
||||
currentVersions[minVersion] = minVersion
|
||||
currentVersionStr = fmt.Sprintf("v%s", minVersion)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMinVersion(versionsMap map[string]string) (string, error) {
|
||||
versionList := []*versionutil.Version{}
|
||||
|
||||
for version := range versionsMap {
|
||||
if versionStr, err := versionutil.ParseSemantic(version); err == nil {
|
||||
versionList = append(versionList, versionStr)
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(versionList) > 1 {
|
||||
minVersion := versionList[0]
|
||||
for _, version := range versionList {
|
||||
if minVersion.AtLeast(version) {
|
||||
minVersion = version
|
||||
}
|
||||
}
|
||||
|
||||
return minVersion.String(), nil
|
||||
} else {
|
||||
return versionList[0].String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
func upgradeKubeMasters(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
|
||||
kubeletVersion, err := mgr.Runner.ExecuteCmd("/usr/local/bin/kubelet --version", 3, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to get current kubelet version")
|
||||
}
|
||||
if strings.Split(kubeletVersion, " ")[1] != mgr.Cluster.Kubernetes.Version {
|
||||
if err := SyncKubeBinaries(mgr, node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -70,17 +145,12 @@ func upgradeKubeMasters(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Co
|
|||
return nil
|
||||
}
|
||||
|
||||
func UpgradeKubeWorkers(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Upgrading kube workers")
|
||||
return mgr.RunTaskOnWorkerNodes(upgradeKubeWorkers, false)
|
||||
}
|
||||
|
||||
func upgradeKubeWorkers(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
version, err := mgr.Runner.ExecuteCmd("/usr/local/bin/kubelet --version", 3, false)
|
||||
func upgradeKubeWorkers(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
|
||||
kubeletVersion, err := mgr.Runner.ExecuteCmd("/usr/local/bin/kubelet --version", 3, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to get current kubelet version")
|
||||
}
|
||||
if strings.Split(version, " ")[1] != mgr.Cluster.Kubernetes.Version {
|
||||
if strings.Split(kubeletVersion, " ")[1] != mgr.Cluster.Kubernetes.Version {
|
||||
|
||||
if err := SyncKubeBinaries(mgr, node); err != nil {
|
||||
return err
|
||||
|
|
@ -97,3 +167,86 @@ func upgradeKubeWorkers(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Co
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeKubeCluster(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Upgrading kube cluster")
|
||||
targetVersionStr := mgr.Cluster.Kubernetes.Version
|
||||
cmp, err := versionutil.MustParseSemantic(currentVersionStr).Compare(mgr.Cluster.Kubernetes.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cmp == 1 {
|
||||
mgr.Logger.Warningln(fmt.Sprintf("The current version (%s) is greater than the target version (%s)", currentVersionStr, targetVersionStr))
|
||||
os.Exit(0)
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
if currentVersionStr != targetVersionStr {
|
||||
currentVersion := versionutil.MustParseSemantic(currentVersionStr)
|
||||
targetVersion := versionutil.MustParseSemantic(targetVersionStr)
|
||||
|
||||
var nextVersionMinor uint
|
||||
if targetVersion.Minor() == currentVersion.Minor() {
|
||||
nextVersionMinor = currentVersion.Minor()
|
||||
} else {
|
||||
nextVersionMinor = currentVersion.Minor() + 1
|
||||
}
|
||||
|
||||
if nextVersionMinor == versionutil.MustParseSemantic(targetVersionStr).Minor() {
|
||||
nextVersionStr = targetVersionStr
|
||||
} else {
|
||||
nextVersionPatchList := []int{}
|
||||
for supportVersionStr := range files.FileSha256["kubeadm"]["amd64"] {
|
||||
supportVersion, err := versionutil.ParseSemantic(supportVersionStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if supportVersion.Minor() == nextVersionMinor {
|
||||
nextVersionPatchList = append(nextVersionPatchList, int(supportVersion.Patch()))
|
||||
}
|
||||
}
|
||||
sort.Ints(nextVersionPatchList)
|
||||
|
||||
nextVersion := currentVersion.WithMinor(nextVersionMinor)
|
||||
nextVersion = nextVersion.WithPatch(uint(nextVersionPatchList[len(nextVersionPatchList)-1]))
|
||||
|
||||
nextVersionStr = fmt.Sprintf("v%s", nextVersion.String())
|
||||
}
|
||||
|
||||
mgr.Cluster.Kubernetes.Version = nextVersionStr
|
||||
|
||||
mgr.Logger.Infoln(fmt.Sprintf("Start Upgrade: %s -> %s", currentVersionStr, nextVersionStr))
|
||||
|
||||
preinstall.Prepare(mgr)
|
||||
|
||||
if err := mgr.RunTaskOnK8sNodes(preinstall.PullImages, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.RunTaskOnK8sNodes(upgradeCluster, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentVersionStr = nextVersionStr
|
||||
} else {
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func upgradeCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
if node.IsMaster {
|
||||
if err := upgradeKubeMasters(mgr, node); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := upgradeKubeWorkers(mgr, node); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,10 +12,10 @@ import (
|
|||
func PrePullImages(mgr *manager.Manager) error {
|
||||
mgr.Logger.Infoln("Start to download images on all nodes")
|
||||
|
||||
return mgr.RunTaskOnAllNodes(prePullImages, true)
|
||||
return mgr.RunTaskOnAllNodes(PullImages, true)
|
||||
}
|
||||
|
||||
func prePullImages(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
func PullImages(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
i := images.Images{}
|
||||
i.Images = []images.Image{
|
||||
GetImage(mgr, "etcd"),
|
||||
|
|
|
|||
|
|
@ -151,7 +151,15 @@ func GenerateClusterObjStr(opt *Options, storageNum int) (string, error) {
|
|||
})
|
||||
}
|
||||
|
||||
func GenerateClusterObj(addons, k8sVersion, ksVersion, name, clusterCfgPath string, ksEnabled bool) error {
|
||||
func GenerateClusterObj(addons, k8sVersion, ksVersion, name, kubeconfig, clusterCfgPath string, ksEnabled, fromCluster bool) error {
|
||||
if fromCluster {
|
||||
err := GenerateConfigFromCluster(clusterCfgPath, kubeconfig, name, ksVersion, ksEnabled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
opt := Options{}
|
||||
if name != "" {
|
||||
output := strings.Split(name, ".")
|
||||
|
|
@ -220,8 +228,8 @@ func GenerateClusterObj(addons, k8sVersion, ksVersion, name, clusterCfgPath stri
|
|||
|
||||
if clusterCfgPath != "" {
|
||||
CheckConfigFileStatus(clusterCfgPath)
|
||||
cmd := fmt.Sprintf("echo %s | base64 -d > %s", ClusterObjStrBase64, clusterCfgPath)
|
||||
output, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
|
||||
cmdStr := fmt.Sprintf("echo %s | base64 -d > %s", ClusterObjStrBase64, clusterCfgPath)
|
||||
output, err := exec.Command("/bin/sh", "-c", cmdStr).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("Failed to write config to %s: %s", clusterCfgPath, strings.TrimSpace(string(output))))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func ParseClusterCfg(clusterCfgPath, k8sVersion, ksVersion string, ksEnabled boo
|
|||
}
|
||||
clusterCfg = AllinoneCfg(user, k8sVersion, ksVersion, ksEnabled, logger)
|
||||
} else {
|
||||
cfg, err := ParseCfg(clusterCfgPath)
|
||||
cfg, err := ParseCfg(clusterCfgPath, k8sVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -54,19 +54,21 @@ func ParseClusterCfg(clusterCfgPath, k8sVersion, ksVersion string, ksEnabled boo
|
|||
return clusterCfg, nil
|
||||
}
|
||||
|
||||
func ParseCfg(clusterCfgPath string) (*kubekeyapi.Cluster, error) {
|
||||
func ParseCfg(clusterCfgPath, k8sVersion string) (*kubekeyapi.Cluster, error) {
|
||||
clusterCfg := kubekeyapi.Cluster{}
|
||||
fp, err := filepath.Abs(clusterCfgPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to look up current directory")
|
||||
}
|
||||
if len(k8sVersion) != 0 {
|
||||
_ = exec.Command("/bin/sh", "-c", fmt.Sprintf("sed -i \"/version/s/\\:.*/\\: %s/g\" %s", k8sVersion, fp)).Run()
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Unable to open the given cluster configuration file")
|
||||
}
|
||||
defer file.Close()
|
||||
b1 := bufio.NewReader(file)
|
||||
|
||||
for {
|
||||
result := make(map[string]interface{})
|
||||
content, err := k8syaml.NewYAMLReader(b1).Read()
|
||||
|
|
@ -97,7 +99,7 @@ func ParseCfg(clusterCfgPath string) (*kubekeyapi.Cluster, error) {
|
|||
switch labels["version"] {
|
||||
case "v3.0.0":
|
||||
configMapBase64 = base64.StdEncoding.EncodeToString(content)
|
||||
kubesphereYaml, err = kubesphere.GenerateKubeSphereYaml(repo, "v3.0.0-dev")
|
||||
kubesphereYaml, err = kubesphere.GenerateKubeSphereYaml(repo, "latest")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -161,6 +163,10 @@ func AllinoneCfg(user *user.User, k8sVersion, ksVersion string, ksEnabled bool,
|
|||
allinoneCfg.Spec.Kubernetes = kubekeyapi.Kubernetes{
|
||||
Version: k8sVersion,
|
||||
}
|
||||
} else {
|
||||
allinoneCfg.Spec.Kubernetes = kubekeyapi.Kubernetes{
|
||||
Version: kubekeyapi.DefaultKubeVersion,
|
||||
}
|
||||
}
|
||||
allinoneCfg.Spec.RoleGroups = kubekeyapi.RoleGroups{
|
||||
Etcd: []string{"ks-allinone"},
|
||||
|
|
@ -178,11 +184,11 @@ func AllinoneCfg(user *user.User, k8sVersion, ksVersion string, ksEnabled bool,
|
|||
switch strings.TrimSpace(ksVersion) {
|
||||
case "":
|
||||
configMapBase64 = base64.StdEncoding.EncodeToString([]byte(kubesphere.V3_0_0))
|
||||
kubesphereYaml, _ = kubesphere.GenerateKubeSphereYaml("", "v3.0.0-dev")
|
||||
kubesphereYaml, _ = kubesphere.GenerateKubeSphereYaml("", "latest")
|
||||
allinoneCfg.Spec.KubeSphere.Version = "v3.0.0"
|
||||
case "v3.0.0":
|
||||
configMapBase64 = base64.StdEncoding.EncodeToString([]byte(kubesphere.V3_0_0))
|
||||
kubesphereYaml, _ = kubesphere.GenerateKubeSphereYaml("", "v3.0.0-dev")
|
||||
kubesphereYaml, _ = kubesphere.GenerateKubeSphereYaml("", "latest")
|
||||
allinoneCfg.Spec.KubeSphere.Version = "v3.0.0"
|
||||
case "v2.1.1":
|
||||
configMapBase64 = base64.StdEncoding.EncodeToString([]byte(kubesphere.V2_1_1))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/kubesphere"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/lithammer/dedent"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
ClusterCfgTempl = template.Must(template.New("ClusterCfg").Parse(
|
||||
dedent.Dedent(`apiVersion: kubekey.kubesphere.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: {{ .Options.Name }}
|
||||
spec:
|
||||
hosts:
|
||||
{{- range .Options.Hosts }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
roleGroups:
|
||||
etcd: []
|
||||
master:
|
||||
{{- range .Options.MasterGroup }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
worker:
|
||||
{{- range .Options.WorkerGroup }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
controlPlaneEndpoint:
|
||||
domain: {{ .Options.ControlPlaneEndpointDomain }}
|
||||
address: {{ .Options.ControlPlaneEndpointAddress }}
|
||||
port: {{ .Options.ControlPlaneEndpointPort }}
|
||||
kubernetes:
|
||||
version: {{ .Options.KubeVersion }}
|
||||
imageRepo: {{ .Options.ImageRepo }}
|
||||
clusterName: {{ .Options.ClusterName }}
|
||||
proxyMode: {{ .Options.ProxyMode }}
|
||||
masqueradeAll: {{ .Options.MasqueradeAll }}
|
||||
maxPods: {{ .Options.MaxPods }}
|
||||
nodeCidrMaskSize: {{ .Options.NodeCidrMaskSize }}
|
||||
network:
|
||||
plugin: {{ .Options.NetworkPlugin }}
|
||||
kubePodsCIDR: {{ .Options.PodNetworkCidr }}
|
||||
kubeServiceCIDR: {{ .Options.ServiceNetworkCidr }}
|
||||
registry:
|
||||
privateRegistry: ""
|
||||
|
||||
{{ if .Options.KubeSphereEnabled }}
|
||||
{{ .Options.KubeSphereConfigMap }}
|
||||
{{ end }}
|
||||
`)))
|
||||
)
|
||||
|
||||
func GenerateClusterCfgStr(opt *OptionsCluster) (string, error) {
|
||||
return util.Render(ClusterCfgTempl, util.Data{
|
||||
"Options": opt,
|
||||
})
|
||||
}
|
||||
|
||||
type OptionsCluster struct {
|
||||
Name string
|
||||
Hosts []string
|
||||
MasterGroup []string
|
||||
WorkerGroup []string
|
||||
KubeVersion string
|
||||
ImageRepo string
|
||||
ClusterName string
|
||||
MasqueradeAll string
|
||||
ProxyMode string
|
||||
MaxPods string
|
||||
NodeCidrMaskSize string
|
||||
PodNetworkCidr string
|
||||
ServiceNetworkCidr string
|
||||
NetworkPlugin string
|
||||
ControlPlaneEndpointDomain string
|
||||
ControlPlaneEndpointAddress string
|
||||
ControlPlaneEndpointPort string
|
||||
KubeSphereConfigMap string
|
||||
KubeSphereEnabled bool
|
||||
}
|
||||
|
||||
func GetInfoFromCluster(config, name string) (*OptionsCluster, error) {
|
||||
var kubeconfig string
|
||||
if config != "" {
|
||||
config, err := filepath.Abs(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to look up current directory")
|
||||
}
|
||||
kubeconfig = config
|
||||
} else {
|
||||
kubeconfig = filepath.Join(homeDir(), ".kube", "config")
|
||||
}
|
||||
// use the current context in kubeconfig
|
||||
configCluster, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// create the clientset
|
||||
clientset, err := kubernetes.NewForConfig(configCluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt := OptionsCluster{}
|
||||
if name != "" {
|
||||
output := strings.Split(name, ".")
|
||||
opt.Name = output[0]
|
||||
} else {
|
||||
opt.Name = "config-sample"
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
nodeCfg := kubekeyapi.HostCfg{}
|
||||
nodeInfo, err := clientset.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, address := range nodeInfo.Status.Addresses {
|
||||
if address.Type == "Hostname" {
|
||||
nodeCfg.Name = address.Address
|
||||
}
|
||||
if address.Type == "InternalIP" {
|
||||
nodeCfg.Address = address.Address
|
||||
nodeCfg.InternalAddress = address.Address
|
||||
}
|
||||
}
|
||||
if _, ok := nodeInfo.Labels["node-role.kubernetes.io/master"]; ok {
|
||||
opt.MasterGroup = append(opt.MasterGroup, nodeCfg.Name)
|
||||
if _, ok := nodeInfo.Labels["node-role.kubernetes.io/worker"]; ok {
|
||||
opt.WorkerGroup = append(opt.WorkerGroup, nodeCfg.Name)
|
||||
}
|
||||
} else {
|
||||
opt.WorkerGroup = append(opt.WorkerGroup, nodeCfg.Name)
|
||||
}
|
||||
nodeCfgStr := fmt.Sprintf("{name: %s, address: %s, internalAddress: %s}", nodeCfg.Name, nodeCfg.Address, nodeCfg.InternalAddress)
|
||||
opt.Hosts = append(opt.Hosts, nodeCfgStr)
|
||||
|
||||
opt.MaxPods = nodeInfo.Status.Capacity.Pods().String()
|
||||
}
|
||||
|
||||
kubeadmConfig, err := clientset.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "kubeadm-config", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
viper.SetConfigType("yaml")
|
||||
//fmt.Println(kubeadmConfig.Data["ClusterConfiguration"])
|
||||
if err := viper.ReadConfig(bytes.NewBuffer([]byte(kubeadmConfig.Data["ClusterConfiguration"]))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.KubeVersion = viper.GetString("kubernetesVersion")
|
||||
opt.ImageRepo = viper.GetString("imageRepository")
|
||||
opt.ClusterName = viper.GetString("clusterName")
|
||||
opt.PodNetworkCidr = viper.GetString("networking.podSubnet")
|
||||
opt.ServiceNetworkCidr = viper.GetString("networking.serviceSubnet")
|
||||
if viper.IsSet("controllerManager.extraArgs.node-cidr-mask-size") {
|
||||
opt.NodeCidrMaskSize = viper.GetString("controllerManager.extraArgs.node-cidr-mask-size")
|
||||
} else {
|
||||
opt.NodeCidrMaskSize = "24"
|
||||
}
|
||||
if viper.IsSet("controlPlaneEndpoint") {
|
||||
controlPlaneEndpointStr := viper.GetString("controlPlaneEndpoint")
|
||||
strList := strings.Split(controlPlaneEndpointStr, ":")
|
||||
opt.ControlPlaneEndpointPort = strList[len(strList)-1]
|
||||
strList = strList[:len(strList)-1]
|
||||
address := strings.Join(strList, ":")
|
||||
ip := net.ParseIP(address)
|
||||
if ip != nil {
|
||||
opt.ControlPlaneEndpointAddress = address
|
||||
opt.ControlPlaneEndpointDomain = "lb.kubesphere.local"
|
||||
} else {
|
||||
opt.ControlPlaneEndpointAddress = ""
|
||||
opt.ControlPlaneEndpointDomain = address
|
||||
}
|
||||
}
|
||||
|
||||
pods, err := clientset.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
for _, pod := range pods.Items {
|
||||
if strings.Contains(pod.Name, "calico") {
|
||||
opt.NetworkPlugin = "calico"
|
||||
}
|
||||
if strings.Contains(pod.Name, "flannel") {
|
||||
opt.NetworkPlugin = "flannel"
|
||||
}
|
||||
}
|
||||
|
||||
kubeProxyConfig, err := clientset.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "kube-proxy", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
viper.SetConfigType("yaml")
|
||||
if err := viper.ReadConfig(bytes.NewBuffer([]byte(kubeProxyConfig.Data["config.conf"]))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt.MasqueradeAll = viper.GetString("iptables.masqueradeAll")
|
||||
if viper.GetString("mode") == "ipvs" {
|
||||
opt.ProxyMode = viper.GetString("mode")
|
||||
} else {
|
||||
opt.ProxyMode = "iptables"
|
||||
}
|
||||
|
||||
return &opt, nil
|
||||
}
|
||||
|
||||
func GenerateConfigFromCluster(cfgPath, kubeconfig, name, ksVersion string, ksEnabled bool) error {
|
||||
opt, err := GetInfoFromCluster(kubeconfig, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ksEnabled {
|
||||
switch strings.TrimSpace(ksVersion) {
|
||||
case "":
|
||||
opt.KubeSphereConfigMap = kubesphere.V3_0_0
|
||||
case "v3.0.0":
|
||||
opt.KubeSphereConfigMap = kubesphere.V3_0_0
|
||||
case "v2.1.1":
|
||||
opt.KubeSphereConfigMap = kubesphere.V2_1_1
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("Unsupported version: %s", strings.TrimSpace(ksVersion)))
|
||||
}
|
||||
}
|
||||
|
||||
opt.KubeSphereEnabled = ksEnabled
|
||||
|
||||
ClusterCfgStr, err := GenerateClusterCfgStr(opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Faild to generate cluster config")
|
||||
}
|
||||
ClusterCfgStrBase64 := base64.StdEncoding.EncodeToString([]byte(ClusterCfgStr))
|
||||
|
||||
if cfgPath != "" {
|
||||
CheckConfigFileStatus(cfgPath)
|
||||
cmd := fmt.Sprintf("echo %s | base64 -d > %s", ClusterCfgStrBase64, cfgPath)
|
||||
output, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("Failed to write config to %s: %s", cfgPath, strings.TrimSpace(string(output))))
|
||||
}
|
||||
} else {
|
||||
currentDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to get current dir")
|
||||
}
|
||||
CheckConfigFileStatus(fmt.Sprintf("%s/%s.yaml", currentDir, opt.Name))
|
||||
cmd := fmt.Sprintf("echo %s | base64 -d > %s/%s.yaml", ClusterCfgStrBase64, currentDir, opt.Name)
|
||||
err1 := exec.Command("/bin/sh", "-c", cmd).Run()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if h := os.Getenv("HOME"); h != "" {
|
||||
return h
|
||||
}
|
||||
return os.Getenv("USERPROFILE") // windows
|
||||
}
|
||||
|
|
@ -36,7 +36,7 @@ type KubeBinary struct {
|
|||
}
|
||||
|
||||
var (
|
||||
fileSha256 = map[string]map[string]map[string]string{
|
||||
FileSha256 = map[string]map[string]map[string]string{
|
||||
kubeadm: {
|
||||
amd64: {
|
||||
"v1.15.12": "e052bae41e731921a9197b4d078c30d33fac5861716dc275bfee4670addbac9b",
|
||||
|
|
@ -139,6 +139,6 @@ var (
|
|||
)
|
||||
|
||||
func (binary *KubeBinary) GetSha256() string {
|
||||
sha256 := fileSha256[binary.Name][binary.Arch][binary.Version]
|
||||
sha256 := FileSha256[binary.Name][binary.Arch][binary.Version]
|
||||
return sha256
|
||||
}
|
||||
|
|
|
|||
|
|
@ -415,7 +415,7 @@ spec:
|
|||
|
||||
func GenerateKubeSphereYaml(repo, version string) (string, error) {
|
||||
if repo == "" {
|
||||
if strings.Contains(version, "v3.0.0") {
|
||||
if strings.Contains(version, "latest") {
|
||||
repo = "kubespheredev"
|
||||
} else {
|
||||
repo = "kubesphere"
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func DeployRBDProvisioner(mgr *manager.Manager) error {
|
|||
return errors.Wrap(errors.WithStack(err1), "Failed to generate rbd-provisioner manifests")
|
||||
}
|
||||
|
||||
_, err2 := mgr.Runner.ExecuteCmd("sudo -E /bin/sh \"/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/rbd-provisioner.yaml -n kube-system\"", 5, true)
|
||||
_, err2 := mgr.Runner.ExecuteCmd("sudo -E /bin/sh -c \"/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/rbd-provisioner.yaml -n kube-system\"", 5, true)
|
||||
if err2 != nil {
|
||||
return errors.Wrap(errors.WithStack(err2), "Failed to deploy rbd-provisioner.yaml")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package upgrade
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/kubesphere/kubekey/pkg/util/ssh"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetClusterInfo(mgr *manager.Manager) error {
|
||||
return mgr.RunTaskOnMasterNodes(getClusterInfo, true)
|
||||
}
|
||||
|
||||
func getClusterInfo(mgr *manager.Manager, node *kubekeyapi.HostCfg, _ ssh.Connection) error {
|
||||
if mgr.Runner.Index == 0 {
|
||||
componentstatus, err := mgr.Runner.ExecuteCmd("sudo -E /bin/bash -c \"/usr/local/bin/kubectl get componentstatus\"", 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Cluster components status:")
|
||||
fmt.Println(componentstatus + "\n")
|
||||
nodestatus, err := mgr.Runner.ExecuteCmd("sudo -E /bin/sh -c \"/usr/local/bin/kubectl get node -o wide\"", 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Cluster nodes status:")
|
||||
fmt.Println(nodestatus + "\n\n")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
Loop:
|
||||
for {
|
||||
fmt.Printf("Continue upgrading cluster? [yes/no]: ")
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
mgr.Logger.Fatal(err)
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
switch input {
|
||||
case "yes":
|
||||
break Loop
|
||||
case "no":
|
||||
os.Exit(0)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -19,8 +19,8 @@ package upgrade
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/kubesphere/kubekey/pkg/cluster/kubernetes"
|
||||
"github.com/kubesphere/kubekey/pkg/cluster/preinstall"
|
||||
"github.com/kubesphere/kubekey/pkg/config"
|
||||
"github.com/kubesphere/kubekey/pkg/kubesphere"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/kubesphere/kubekey/pkg/util/executor"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
|
|
@ -49,11 +49,12 @@ func UpgradeCluster(clusterCfgFile, k8sVersion, ksVersion string, logger *log.Lo
|
|||
|
||||
func ExecTasks(mgr *manager.Manager) error {
|
||||
scaleTasks := []manager.Task{
|
||||
{Task: preinstall.Precheck, ErrMsg: "Failed to precheck"},
|
||||
{Task: preinstall.InitOS, ErrMsg: "Failed to download kube binaries"},
|
||||
{Task: preinstall.PrePullImages, ErrMsg: "Failed to pre-pull images"},
|
||||
{Task: kubernetes.UpgradeKubeMasters, ErrMsg: "Failed to upgrade kube masters"},
|
||||
{Task: kubernetes.UpgradeKubeWorkers, ErrMsg: "Failed to upgrade kube workers"},
|
||||
{Task: GetClusterInfo, ErrMsg: "Failed to get cluster info"},
|
||||
//{Task: preinstall.InitOS, ErrMsg: "Failed to download kube binaries"},
|
||||
//{Task: preinstall.PrePullImages, ErrMsg: "Failed to pre-pull images"},
|
||||
{Task: kubernetes.GetCurrentVersions, ErrMsg: "Failed to get current version"},
|
||||
{Task: kubernetes.UpgradeKubeCluster, ErrMsg: "Failed to upgrade kube cluster"},
|
||||
{Task: kubesphere.DeployKubeSphere, ErrMsg: "Failed to upgrade kubesphere"},
|
||||
}
|
||||
|
||||
for _, step := range scaleTasks {
|
||||
|
|
@ -62,7 +63,17 @@ func ExecTasks(mgr *manager.Manager) error {
|
|||
}
|
||||
}
|
||||
|
||||
mgr.Logger.Infoln("Congradulations! Upgrade cluster is successful.")
|
||||
if mgr.KsEnable {
|
||||
mgr.Logger.Infoln(`Upgrading is complete.
|
||||
|
||||
Please check the result using the command:
|
||||
|
||||
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
|
||||
|
||||
`)
|
||||
} else {
|
||||
mgr.Logger.Infoln("Congradulations! Upgrade cluster is successful.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,13 +82,9 @@ type Data map[string]interface{}
|
|||
func Render(tmpl *template.Template, variables map[string]interface{}) (string, error) {
|
||||
|
||||
var buf strings.Builder
|
||||
//buf.WriteString(`set -xeu pipefail`)
|
||||
//buf.WriteString("\n\n")
|
||||
//buf.WriteString(`export "PATH=$PATH:/sbin:/usr/local/bin:/opt/bin"`)
|
||||
//buf.WriteString("\n\n")
|
||||
|
||||
if err := tmpl.Execute(&buf, variables); err != nil {
|
||||
return "", errors.Wrap(err, "Failed to render cmd or script template")
|
||||
return "", errors.Wrap(err, "Failed to render template")
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue