separation of install and scale

This commit is contained in:
pixiake 2020-07-04 23:10:43 +08:00
parent 06bfb14786
commit aebf0c456a
3 changed files with 47 additions and 92 deletions

View File

@ -16,7 +16,7 @@ limitations under the License.
package cmd
import (
"github.com/kubesphere/kubekey/pkg/install"
"github.com/kubesphere/kubekey/pkg/scale"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
@ -27,7 +27,7 @@ var scaleCmd = &cobra.Command{
Short: "Scale a cluster according to the new nodes information from the specified configuration file",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(verbose)
return install.CreateCluster(clusterCfgFile, "", "", logger, false, verbose)
return scale.ScaleCluster(clusterCfgFile, "", "", logger, false, verbose)
},
}

View File

@ -1,84 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scale
import (
"fmt"
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/util/manager"
ssh "github.com/kubesphere/kubekey/pkg/util/ssh"
log "github.com/sirupsen/logrus"
)
type Executor struct {
cluster *kubekeyapi.ClusterSpec
logger *log.Logger
Verbose bool
}
func NewExecutor(cluster *kubekeyapi.ClusterSpec, logger *log.Logger, verbose bool) *Executor {
return &Executor{
cluster: cluster,
logger: logger,
Verbose: verbose,
}
}
func (executor *Executor) Execute() error {
mgr, err := executor.createManager()
if err != nil {
return err
}
return ExecTasks(mgr)
}
func (executor *Executor) createManager() (*manager.Manager, error) {
mgr := &manager.Manager{}
hostGroups := executor.cluster.GroupHosts()
mgr.AllNodes = hostGroups.All
mgr.EtcdNodes = hostGroups.Etcd
mgr.MasterNodes = hostGroups.Master
mgr.WorkerNodes = hostGroups.Worker
mgr.K8sNodes = hostGroups.K8s
mgr.Cluster = executor.cluster
mgr.ClusterHosts = GenerateHosts(hostGroups, executor.cluster)
mgr.Connector = ssh.NewConnector()
mgr.Logger = executor.logger
mgr.Verbose = executor.Verbose
return mgr, nil
}
func GenerateHosts(hostGroups *kubekeyapi.HostGroups, cfg *kubekeyapi.ClusterSpec) []string {
var lbHost string
hostsList := []string{}
if cfg.ControlPlaneEndpoint.Address != "" {
lbHost = fmt.Sprintf("%s %s", cfg.ControlPlaneEndpoint.Address, cfg.ControlPlaneEndpoint.Domain)
} else {
lbHost = fmt.Sprintf("%s %s", hostGroups.Master[0].InternalAddress, cfg.ControlPlaneEndpoint.Domain)
}
for _, host := range cfg.Hosts {
if host.Name != "" {
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.Name, cfg.Kubernetes.ClusterName, host.Name))
}
}
hostsList = append(hostsList, lbHost)
return hostsList
}

View File

@ -17,30 +17,69 @@ limitations under the License.
package scale
import (
"fmt"
"github.com/kubesphere/kubekey/pkg/cluster/etcd"
"github.com/kubesphere/kubekey/pkg/cluster/kubernetes"
"github.com/kubesphere/kubekey/pkg/cluster/preinstall"
"github.com/kubesphere/kubekey/pkg/config"
"github.com/kubesphere/kubekey/pkg/container-engine/docker"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/util/executor"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"os"
"path/filepath"
)
func ScaleCluster(clusterCfgFile, k8sVersion, ksVersion string, logger *log.Logger, ksEnabled, verbose bool) error {
currentDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return errors.Wrap(err, "Faild to get current dir")
}
if err := util.CreateDir(fmt.Sprintf("%s/kubekey", currentDir)); err != nil {
return errors.Wrap(err, "Failed to create work dir")
}
cfg, err := config.ParseClusterCfg(clusterCfgFile, k8sVersion, ksVersion, ksEnabled, logger)
if err != nil {
return errors.Wrap(err, "Failed to download cluster config")
}
return Execute(executor.NewExecutor(&cfg.Spec, logger, verbose))
}
func ExecTasks(mgr *manager.Manager) error {
scaleTasks := []manager.Task{
{Task: preinstall.Precheck, ErrMsg: "Failed to precheck"},
{Task: preinstall.InitOS, ErrMsg: "Failed to download kube binaries"},
{Task: docker.InstallerDocker, ErrMsg: "Failed to install docker"},
{Task: preinstall.PrePullImages, ErrMsg: "Failed to pre-pull images"},
{Task: etcd.GenerateEtcdCerts, ErrMsg: "Failed to generate etcd certs"},
{Task: etcd.SyncEtcdCertsToMaster, ErrMsg: "Failed to sync etcd certs"},
{Task: etcd.GenerateEtcdService, ErrMsg: "Failed to create etcd service"},
{Task: etcd.SetupEtcdCluster, ErrMsg: "Failed to start etcd cluster"},
{Task: etcd.RefreshEtcdConfig, ErrMsg: "Failed to refresh etcd configuration"},
{Task: kubernetes.GetClusterStatus, ErrMsg: "Failed to get cluster status"},
{Task: kubernetes.SyncKubeBinaries, ErrMsg: "Failed to sync kube binaries"},
//{Task: kubernetes.ConfigureKubeletService, ErrMsg: "Failed to sync kube binaries"},
//{Task: kubernetes.GetJoinNodesCmd, ErrMsg: "Failed to get join cmd"},
{Task: kubernetes.JoinNodesToCluster, ErrMsg: "Failed to join node"},
}
for _, task := range scaleTasks {
if err := task.Run(mgr); err != nil {
return errors.Wrap(err, task.ErrMsg)
for _, step := range scaleTasks {
if err := step.Run(mgr); err != nil {
return errors.Wrap(err, step.ErrMsg)
}
}
mgr.Logger.Infoln("Cluster scaling is successful.")
mgr.Logger.Infoln("Congradulations! Scaling cluster is successful.")
return nil
}
func Execute(executor *executor.Executor) error {
mgr, err := executor.CreateManager()
if err != nil {
return err
}
return ExecTasks(mgr)
}