mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
refactor the cluster struct
This commit is contained in:
parent
54c86145a8
commit
cc2b19c83e
|
|
@ -11,20 +11,20 @@ func NewCmdCreateCluster() *cobra.Command {
|
|||
clusterCfgFile string
|
||||
addons string
|
||||
pkgDir string
|
||||
Verbose bool
|
||||
verbose bool
|
||||
)
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Create Kubernetes Cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := util.InitLogger(Verbose)
|
||||
return install.CreateCluster(clusterCfgFile, logger, addons, pkgDir, Verbose)
|
||||
logger := util.InitLogger(verbose)
|
||||
return install.CreateCluster(clusterCfgFile, logger, addons, pkgDir, verbose)
|
||||
},
|
||||
}
|
||||
|
||||
clusterCmd.Flags().StringVarP(&clusterCfgFile, "config", "f", "", "cluster info config")
|
||||
clusterCmd.Flags().StringVarP(&addons, "add", "", "", "add plugins")
|
||||
clusterCmd.Flags().StringVarP(&pkgDir, "pkg", "", "", "release package (offline)")
|
||||
clusterCmd.Flags().BoolVarP(&Verbose, "debug", "", true, "debug info")
|
||||
clusterCmd.Flags().BoolVarP(&verbose, "debug", "", true, "debug info")
|
||||
return clusterCmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,17 +1,7 @@
|
|||
package create
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func NewCmdCreateCfg() *cobra.Command {
|
||||
|
|
@ -33,212 +23,212 @@ func NewCmdCreateCfg() *cobra.Command {
|
|||
return clusterCfgCmd
|
||||
}
|
||||
|
||||
func getConfig(reader *bufio.Reader, text, def string) (string, error) {
|
||||
for {
|
||||
if def == "" {
|
||||
fmt.Printf("[+] %s [%s]: ", text, "none")
|
||||
} else {
|
||||
fmt.Printf("[+] %s [%s]: ", text, def)
|
||||
}
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if input != "" {
|
||||
return input, nil
|
||||
}
|
||||
return def, nil
|
||||
}
|
||||
}
|
||||
|
||||
func writeConfig(cluster *kubekeyapi.K2ClusterSpec, configFile string, print bool) error {
|
||||
yamlConfig, err := yaml.Marshal(*cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Deploying cluster info file: %s", configFile)
|
||||
|
||||
configString := fmt.Sprintf("%s", string(yamlConfig))
|
||||
if print {
|
||||
fmt.Printf("%s", configString)
|
||||
//return nil
|
||||
}
|
||||
return ioutil.WriteFile(configFile, []byte(configString), 0640)
|
||||
}
|
||||
|
||||
func clusterConfig() error {
|
||||
clusterCfg := kubekeyapi.K2ClusterSpec{}
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
// Get number of hosts
|
||||
numberOfHostsString, err := getConfig(reader, "Number of Hosts", "1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numberOfHostsInt, err := strconv.Atoi(numberOfHostsString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//sshKeyPath, err := getConfig(reader, "Cluster Level SSH Private Key Path", "~/.ssh-bak/id_rsa")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//clusterCfg.SSHKeyPath = sshKeyPath
|
||||
// Get Hosts config
|
||||
masterNumber := 0
|
||||
clusterCfg.Hosts = make([]kubekeyapi.HostCfg, 0)
|
||||
for i := 0; i < numberOfHostsInt; i++ {
|
||||
hostCfg, isMaster, err := getHostConfig(reader, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterCfg.Hosts = append(clusterCfg.Hosts, *hostCfg)
|
||||
if isMaster == true {
|
||||
masterNumber = masterNumber + 1
|
||||
}
|
||||
}
|
||||
if masterNumber > 1 {
|
||||
lbCfg := kubekeyapi.LBKubeApiserverCfg{}
|
||||
address, err := getConfig(reader, fmt.Sprintf("Address of LoadBalancer for KubeApiserver"), "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lbCfg.Address = address
|
||||
|
||||
port, err := getConfig(reader, fmt.Sprintf("Port of LoadBalancer for KubeApiserver"), kubekeyapi.DefaultLBPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lbCfg.Port = port
|
||||
|
||||
domain, err := getConfig(reader, fmt.Sprintf("Address of LoadBalancer for KubeApiserver"), kubekeyapi.DefaultLBDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lbCfg.Domain = domain
|
||||
|
||||
clusterCfg.LBKubeApiserver = lbCfg
|
||||
}
|
||||
// Get Kubernetes version
|
||||
version, err := getConfig(reader, fmt.Sprintf("Kubernetes Version"), kubekeyapi.DefaultKubeVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterCfg.KubeCluster.Version = version
|
||||
|
||||
// Get Kubernetes images repo
|
||||
repo, err := getConfig(reader, fmt.Sprintf("Kubernetes Images Repo"), kubekeyapi.DefaultKubeImageRepo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterCfg.KubeCluster.ImageRepo = repo
|
||||
|
||||
// Get Kubernetes cluster name
|
||||
clusterName, err := getConfig(reader, fmt.Sprintf("Kubernetes Cluster Name"), kubekeyapi.DefaultClusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterCfg.KubeCluster.ClusterName = clusterName
|
||||
|
||||
// Get Network config
|
||||
networkConfig, err := getNetworkConfig(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterCfg.Network = *networkConfig
|
||||
|
||||
dir, _ := os.Executable()
|
||||
exPath := filepath.Dir(dir)
|
||||
configFile := fmt.Sprintf("%s/%s", exPath, "cluster-info.yaml")
|
||||
return writeConfig(&clusterCfg, configFile, true)
|
||||
}
|
||||
|
||||
func getHostConfig(reader *bufio.Reader, index int) (*kubekeyapi.HostCfg, bool, error) {
|
||||
isMaster := false
|
||||
host := kubekeyapi.HostCfg{}
|
||||
address, err := getConfig(reader, fmt.Sprintf("SSH Address of host (%d)", index+1), "")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.SSHAddress = address
|
||||
|
||||
port, err := getConfig(reader, fmt.Sprintf("SSH Port of host (%s)", address), kubekeyapi.DefaultSSHPort)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.Port = port
|
||||
|
||||
sshUser, err := getConfig(reader, fmt.Sprintf("SSH User of host (%s)", address), "root")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.User = sshUser
|
||||
|
||||
password, err := getConfig(reader, fmt.Sprintf("SSH Password of host (%s)", address), "")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.Password = password
|
||||
|
||||
hostRole, err := getConfig(reader, fmt.Sprintf("What's host (%s) role? (0: etcd, 1: master, 2: worker)", address), "012")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if strings.Contains(hostRole, "0") {
|
||||
host.Role = append(host.Role, kubekeyapi.ETCDRole)
|
||||
}
|
||||
if strings.Contains(hostRole, "1") {
|
||||
host.Role = append(host.Role, kubekeyapi.MasterRole)
|
||||
isMaster = true
|
||||
}
|
||||
if strings.Contains(hostRole, "2") {
|
||||
host.Role = append(host.Role, kubekeyapi.WorkerRole)
|
||||
}
|
||||
|
||||
hostnameOverride, err := getConfig(reader, fmt.Sprintf("Override Hostname of host (%s)", address), "")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.HostName = hostnameOverride
|
||||
|
||||
internalAddress, err := getConfig(reader, fmt.Sprintf("Internal IP of host (%s)", address), "")
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
host.InternalAddress = internalAddress
|
||||
|
||||
return &host, isMaster, nil
|
||||
}
|
||||
|
||||
func getNetworkConfig(reader *bufio.Reader) (*kubekeyapi.NetworkConfig, error) {
|
||||
networkConfig := kubekeyapi.NetworkConfig{}
|
||||
|
||||
networkPlugin, err := getConfig(reader, "Network Plugin Type (0: calico, 1: flannel)", "0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkPluginInt, _ := strconv.Atoi(networkPlugin)
|
||||
if networkPluginInt == 0 {
|
||||
networkConfig.Plugin = "calico"
|
||||
}
|
||||
if networkPluginInt == 1 {
|
||||
networkConfig.Plugin = "flannel"
|
||||
}
|
||||
|
||||
podsCIDR, err := getConfig(reader, "Specify range of IP addresses for the pod network.", kubekeyapi.DefaultPodsCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkConfig.KubePodsCIDR = podsCIDR
|
||||
|
||||
serviceCIDR, err := getConfig(reader, "Use alternative range of IP address for service VIPs.", kubekeyapi.DefaultServiceCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkConfig.KubeServiceCIDR = serviceCIDR
|
||||
return &networkConfig, nil
|
||||
}
|
||||
//func getConfig(reader *bufio.Reader, text, def string) (string, error) {
|
||||
// for {
|
||||
// if def == "" {
|
||||
// fmt.Printf("[+] %s [%s]: ", text, "none")
|
||||
// } else {
|
||||
// fmt.Printf("[+] %s [%s]: ", text, def)
|
||||
// }
|
||||
// input, err := reader.ReadString('\n')
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// input = strings.TrimSpace(input)
|
||||
//
|
||||
// if input != "" {
|
||||
// return input, nil
|
||||
// }
|
||||
// return def, nil
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func writeConfig(cluster *kubekeyapi.K2ClusterSpec, configFile string, print bool) error {
|
||||
// yamlConfig, err := yaml.Marshal(*cluster)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// log.Infof("Deploying cluster info file: %s", configFile)
|
||||
//
|
||||
// configString := fmt.Sprintf("%s", string(yamlConfig))
|
||||
// if print {
|
||||
// fmt.Printf("%s", configString)
|
||||
// //return nil
|
||||
// }
|
||||
// return ioutil.WriteFile(configFile, []byte(configString), 0640)
|
||||
//}
|
||||
//
|
||||
//func clusterConfig() error {
|
||||
// clusterCfg := kubekeyapi.K2ClusterSpec{}
|
||||
// reader := bufio.NewReader(os.Stdin)
|
||||
//
|
||||
// // Get number of hosts
|
||||
// numberOfHostsString, err := getConfig(reader, "Number of Hosts", "1")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// numberOfHostsInt, err := strconv.Atoi(numberOfHostsString)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// //sshKeyPath, err := getConfig(reader, "Cluster Level SSH Private Key Path", "~/.ssh-bak/id_rsa")
|
||||
// //if err != nil {
|
||||
// // return err
|
||||
// //}
|
||||
// //clusterCfg.SSHKeyPath = sshKeyPath
|
||||
// // Get Hosts config
|
||||
// masterNumber := 0
|
||||
// clusterCfg.Hosts = make([]kubekeyapi.HostCfg, 0)
|
||||
// for i := 0; i < numberOfHostsInt; i++ {
|
||||
// hostCfg, isMaster, err := getHostConfig(reader, i)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// clusterCfg.Hosts = append(clusterCfg.Hosts, *hostCfg)
|
||||
// if isMaster == true {
|
||||
// masterNumber = masterNumber + 1
|
||||
// }
|
||||
// }
|
||||
// if masterNumber > 1 {
|
||||
// lbCfg := kubekeyapi.LBKubeApiserverCfg{}
|
||||
// address, err := getConfig(reader, fmt.Sprintf("Address of LoadBalancer for KubeApiserver"), "")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// lbCfg.Address = address
|
||||
//
|
||||
// port, err := getConfig(reader, fmt.Sprintf("Port of LoadBalancer for KubeApiserver"), kubekeyapi.DefaultLBPort)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// lbCfg.Port = port
|
||||
//
|
||||
// domain, err := getConfig(reader, fmt.Sprintf("Address of LoadBalancer for KubeApiserver"), kubekeyapi.DefaultLBDomain)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// lbCfg.Domain = domain
|
||||
//
|
||||
// clusterCfg.LBKubeApiserver = lbCfg
|
||||
// }
|
||||
// // Get Kubernetes version
|
||||
// version, err := getConfig(reader, fmt.Sprintf("Kubernetes Version"), kubekeyapi.DefaultKubeVersion)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// clusterCfg.KubeCluster.Version = version
|
||||
//
|
||||
// // Get Kubernetes images repo
|
||||
// repo, err := getConfig(reader, fmt.Sprintf("Kubernetes Images Repo"), kubekeyapi.DefaultKubeImageRepo)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// clusterCfg.KubeCluster.ImageRepo = repo
|
||||
//
|
||||
// // Get Kubernetes cluster name
|
||||
// clusterName, err := getConfig(reader, fmt.Sprintf("Kubernetes Cluster Name"), kubekeyapi.DefaultClusterName)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// clusterCfg.KubeCluster.ClusterName = clusterName
|
||||
//
|
||||
// // Get Network config
|
||||
// networkConfig, err := getNetworkConfig(reader)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// clusterCfg.Network = *networkConfig
|
||||
//
|
||||
// dir, _ := os.Executable()
|
||||
// exPath := filepath.Dir(dir)
|
||||
// configFile := fmt.Sprintf("%s/%s", exPath, "cluster-info.yaml")
|
||||
// return writeConfig(&clusterCfg, configFile, true)
|
||||
//}
|
||||
//
|
||||
//func getHostConfig(reader *bufio.Reader, index int) (*kubekeyapi.HostCfg, bool, error) {
|
||||
// isMaster := false
|
||||
// host := kubekeyapi.HostCfg{}
|
||||
// address, err := getConfig(reader, fmt.Sprintf("SSH Address of host (%d)", index+1), "")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.SSHAddress = address
|
||||
//
|
||||
// port, err := getConfig(reader, fmt.Sprintf("SSH Port of host (%s)", address), kubekeyapi.DefaultSSHPort)
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.Port = port
|
||||
//
|
||||
// sshUser, err := getConfig(reader, fmt.Sprintf("SSH User of host (%s)", address), "root")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.User = sshUser
|
||||
//
|
||||
// password, err := getConfig(reader, fmt.Sprintf("SSH Password of host (%s)", address), "")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.Password = password
|
||||
//
|
||||
// hostRole, err := getConfig(reader, fmt.Sprintf("What's host (%s) role? (0: etcd, 1: master, 2: worker)", address), "012")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
//
|
||||
// if strings.Contains(hostRole, "0") {
|
||||
// host.Role = append(host.Role, kubekeyapi.ETCDRole)
|
||||
// }
|
||||
// if strings.Contains(hostRole, "1") {
|
||||
// host.Role = append(host.Role, kubekeyapi.MasterRole)
|
||||
// isMaster = true
|
||||
// }
|
||||
// if strings.Contains(hostRole, "2") {
|
||||
// host.Role = append(host.Role, kubekeyapi.WorkerRole)
|
||||
// }
|
||||
//
|
||||
// hostnameOverride, err := getConfig(reader, fmt.Sprintf("Override Hostname of host (%s)", address), "")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.HostName = hostnameOverride
|
||||
//
|
||||
// internalAddress, err := getConfig(reader, fmt.Sprintf("Internal IP of host (%s)", address), "")
|
||||
// if err != nil {
|
||||
// return nil, false, err
|
||||
// }
|
||||
// host.InternalAddress = internalAddress
|
||||
//
|
||||
// return &host, isMaster, nil
|
||||
//}
|
||||
//
|
||||
//func getNetworkConfig(reader *bufio.Reader) (*kubekeyapi.NetworkConfig, error) {
|
||||
// networkConfig := kubekeyapi.NetworkConfig{}
|
||||
//
|
||||
// networkPlugin, err := getConfig(reader, "Network Plugin Type (0: calico, 1: flannel)", "0")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// networkPluginInt, _ := strconv.Atoi(networkPlugin)
|
||||
// if networkPluginInt == 0 {
|
||||
// networkConfig.Plugin = "calico"
|
||||
// }
|
||||
// if networkPluginInt == 1 {
|
||||
// networkConfig.Plugin = "flannel"
|
||||
// }
|
||||
//
|
||||
// podsCIDR, err := getConfig(reader, "Specify range of IP addresses for the pod network.", kubekeyapi.DefaultPodsCIDR)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// networkConfig.KubePodsCIDR = podsCIDR
|
||||
//
|
||||
// serviceCIDR, err := getConfig(reader, "Use alternative range of IP address for service VIPs.", kubekeyapi.DefaultServiceCIDR)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// networkConfig.KubeServiceCIDR = serviceCIDR
|
||||
// return &networkConfig, nil
|
||||
//}
|
||||
|
|
|
|||
10
cmd/reset.go
10
cmd/reset.go
|
|
@ -10,20 +10,22 @@ import (
|
|||
func NewCmdResetCluster() *cobra.Command {
|
||||
var (
|
||||
clusterCfgFile string
|
||||
verbose bool
|
||||
)
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "reset",
|
||||
Short: "Reset Kubernetes Cluster",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger := util.InitLogger(true)
|
||||
resetCluster(clusterCfgFile, logger)
|
||||
logger := util.InitLogger(verbose)
|
||||
resetCluster(clusterCfgFile, logger, verbose)
|
||||
},
|
||||
}
|
||||
|
||||
clusterCmd.Flags().StringVarP(&clusterCfgFile, "config", "f", "", "")
|
||||
clusterCmd.Flags().BoolVarP(&verbose, "debug", "", true, "")
|
||||
return clusterCmd
|
||||
}
|
||||
|
||||
func resetCluster(clusterCfgFile string, logger *log.Logger) {
|
||||
reset.ResetCluster(clusterCfgFile, logger)
|
||||
func resetCluster(clusterCfgFile string, logger *log.Logger, verbose bool) {
|
||||
reset.ResetCluster(clusterCfgFile, logger, verbose)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,19 +10,19 @@ func NewCmdScaleCluster() *cobra.Command {
|
|||
var (
|
||||
clusterCfgFile string
|
||||
pkgDir string
|
||||
Verbose bool
|
||||
verbose bool
|
||||
)
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "scale",
|
||||
Short: "Scale cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := util.InitLogger(Verbose)
|
||||
logger := util.InitLogger(verbose)
|
||||
return scale.ScaleCluster(clusterCfgFile, logger, pkgDir, Verbose)
|
||||
},
|
||||
}
|
||||
|
||||
clusterCmd.Flags().StringVarP(&clusterCfgFile, "config", "f", "", "")
|
||||
clusterCmd.Flags().StringVarP(&pkgDir, "pkg", "", "", "")
|
||||
clusterCmd.Flags().BoolVarP(&Verbose, "debug", "", true, "")
|
||||
clusterCmd.Flags().BoolVarP(&verbose, "debug", "", true, "")
|
||||
return clusterCmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,23 @@
|
|||
```yaml
|
||||
apiVersion: kubekey.io/v1alpha1
|
||||
kind: K2Cluster
|
||||
apiVersion: kubekey.kubesphere.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: demo
|
||||
name: example
|
||||
spec:
|
||||
hosts:
|
||||
- hostName: node1
|
||||
sshAddress: 172.16.0.2
|
||||
internalAddress: 172.16.0.2
|
||||
port: "22"
|
||||
user: ubuntu
|
||||
password: Qcloud@123
|
||||
sshKeyPath: ""
|
||||
role:
|
||||
- etcd
|
||||
- master
|
||||
- worker
|
||||
lbKubeapiserver:
|
||||
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: ubuntu, password: Qcloud@123}
|
||||
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, password: Qcloud@123}
|
||||
- {name: node3, address: 172.16.0.4, internalAddress: 172.16.0.4, privateKeyPath: "~/.ssh/id_rsa"}
|
||||
roleGroups:
|
||||
etcd:
|
||||
- node1
|
||||
master:
|
||||
- node1
|
||||
- node[2:10]
|
||||
worker:
|
||||
- node1
|
||||
- node[10:100]
|
||||
controlPlaneEndpoint:
|
||||
domain: lb.kubesphere.local
|
||||
address: ""
|
||||
port: "6443"
|
||||
|
|
@ -26,8 +27,8 @@ spec:
|
|||
clusterName: cluster.local
|
||||
network:
|
||||
plugin: calico
|
||||
kube_pods_cidr: 10.233.64.0/18
|
||||
kube_service_cidr: 10.233.0.0/18
|
||||
podNetworkCidr: 10.233.64.0/18
|
||||
serviceNetworkCidr: 10.233.0.0/18
|
||||
registry:
|
||||
registryMirrors: []
|
||||
insecureRegistries: []
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
//klog.InitFlags(nil)
|
||||
//pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
pflag.Set("logtostderr", "true")
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@ import (
|
|||
"fmt"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
|
|
@ -14,15 +17,16 @@ type K2ClusterSpec struct {
|
|||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
|
||||
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
|
||||
Hosts []HostCfg `yaml:"hosts" json:"hosts,omitempty"`
|
||||
LBKubeApiserver LBKubeApiserverCfg `yaml:"lbKubeapiserver" json:"lbKubeapiserver,omitempty"`
|
||||
KubeCluster KubeCluster `yaml:"kubeCluster" json:"kubeCluster,omitempty"`
|
||||
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
|
||||
Registry RegistryConfig `yaml:"registry" json:"registry,omitempty"`
|
||||
Plugins PluginsCfg `yaml:"plugins" json:"plugins,omitempty"`
|
||||
Hosts []HostCfg `yaml:"hosts" json:"hosts,omitempty"`
|
||||
RoleGroups RoleGroups `yaml:"roleGroups" json:"roleGroups,omitempty"`
|
||||
ControlPlaneEndpoint ControlPlaneEndpoint `yaml:"controlPlaneEndpoint" json:"controlPlaneEndpoint,omitempty"`
|
||||
Kubernetes Kubernetes `yaml:"kubernetes" json:"kubernetes,omitempty"`
|
||||
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
|
||||
Registry RegistryConfig `yaml:"registry" json:"registry,omitempty"`
|
||||
Plugins PluginsCfg `yaml:"plugins" json:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
type KubeCluster struct {
|
||||
type Kubernetes struct {
|
||||
Version string `yaml:"version" json:"version,omitempty"`
|
||||
ImageRepo string `yaml:"imageRepo" json:"imageRepo,omitempty"`
|
||||
ClusterName string `yaml:"clusterName" json:"clusterName,omitempty"`
|
||||
|
|
@ -62,23 +66,32 @@ type K2ClusterList struct {
|
|||
//}
|
||||
|
||||
type HostCfg struct {
|
||||
HostName string `yaml:"hostName" json:"hostName,omitempty"`
|
||||
SSHAddress string `yaml:"sshAddress" json:"sshAddress,omitempty"`
|
||||
InternalAddress string `yaml:"internalAddress" json:"internalAddress,omitempty"`
|
||||
Port string `yaml:"port" json:"port,omitempty"`
|
||||
User string `yaml:"user" json:"user,omitempty"`
|
||||
Password string `yaml:"password" json:"password,omitempty"`
|
||||
SSHKeyPath string `yaml:"sshKeyPath" json:"sshKeyPath,omitempty"`
|
||||
Role []string `yaml:"role" json:"role,omitempty" norman:"type=array[enum],options=etcd|master|worker|client"`
|
||||
ID int `json:"-"`
|
||||
IsEtcd bool `json:"-"`
|
||||
IsMaster bool `json:"-"`
|
||||
IsWorker bool `json:"-"`
|
||||
IsClient bool `json:"-"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Address string `json:"address,omitempty"`
|
||||
InternalAddress string `json:"internalAddress,omitempty"`
|
||||
Port string `json:"port,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
PrivateKeyPath string `json:"privateKeyPath,omitempty"`
|
||||
ID int `json:"-"`
|
||||
IsEtcd bool `json:"-"`
|
||||
IsMaster bool `json:"-"`
|
||||
IsWorker bool `json:"-"`
|
||||
IsClient bool `json:"-"`
|
||||
}
|
||||
|
||||
type Hosts struct {
|
||||
Hosts []HostCfg
|
||||
type RoleGroups struct {
|
||||
Etcd []string `yaml:"etcd" json:"etcd,omitempty"`
|
||||
Master []string `yaml:"master" json:"master,omitempty"`
|
||||
Worker []string `yaml:"worker" json:"worker,omitempty"`
|
||||
}
|
||||
|
||||
type HostGroups struct {
|
||||
All []HostCfg
|
||||
Etcd []HostCfg
|
||||
Master []HostCfg
|
||||
Worker []HostCfg
|
||||
K8s []HostCfg
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
|
|
@ -87,7 +100,7 @@ type NetworkConfig struct {
|
|||
KubeServiceCIDR string `yaml:"kube_service_cidr" json:"kube_service_cidr,omitempty"`
|
||||
}
|
||||
|
||||
type LBKubeApiserverCfg struct {
|
||||
type ControlPlaneEndpoint struct {
|
||||
Domain string `yaml:"domain" json:"domain,omitempty"`
|
||||
Address string `yaml:"address" json:"address,omitempty"`
|
||||
Port string `yaml:"port" json:"port,omitempty"`
|
||||
|
|
@ -106,42 +119,21 @@ type ExternalEtcd struct {
|
|||
KeyFile string
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GenerateHosts() []string {
|
||||
var lbHost string
|
||||
hostsList := []string{}
|
||||
|
||||
_, _, masters, _, _, _ := cfg.GroupHosts()
|
||||
if cfg.LBKubeApiserver.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", cfg.LBKubeApiserver.Address, cfg.LBKubeApiserver.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", masters.Hosts[0].InternalAddress, DefaultLBDomain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
if host.HostName != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.HostName, cfg.KubeCluster.ClusterName, host.HostName))
|
||||
}
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
return hostsList
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GenerateCertSANs() []string {
|
||||
clusterSvc := fmt.Sprintf("kubernetes.default.svc.%s", cfg.KubeCluster.ClusterName)
|
||||
clusterSvc := fmt.Sprintf("kubernetes.default.svc.%s", cfg.Kubernetes.ClusterName)
|
||||
defaultCertSANs := []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", clusterSvc, "localhost", "127.0.0.1"}
|
||||
extraCertSANs := []string{}
|
||||
|
||||
extraCertSANs = append(extraCertSANs, cfg.LBKubeApiserver.Domain)
|
||||
extraCertSANs = append(extraCertSANs, cfg.LBKubeApiserver.Address)
|
||||
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Domain)
|
||||
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
extraCertSANs = append(extraCertSANs, host.HostName)
|
||||
extraCertSANs = append(extraCertSANs, fmt.Sprintf("%s.%s", host.HostName, cfg.KubeCluster.ClusterName))
|
||||
if host.SSHAddress != cfg.LBKubeApiserver.Address {
|
||||
extraCertSANs = append(extraCertSANs, host.SSHAddress)
|
||||
extraCertSANs = append(extraCertSANs, host.Name)
|
||||
extraCertSANs = append(extraCertSANs, fmt.Sprintf("%s.%s", host.Name, cfg.Kubernetes.ClusterName))
|
||||
if host.Address != cfg.ControlPlaneEndpoint.Address {
|
||||
extraCertSANs = append(extraCertSANs, host.Address)
|
||||
}
|
||||
if host.InternalAddress != host.SSHAddress && host.InternalAddress != cfg.LBKubeApiserver.Address {
|
||||
if host.InternalAddress != host.Address && host.InternalAddress != cfg.ControlPlaneEndpoint.Address {
|
||||
extraCertSANs = append(extraCertSANs, host.InternalAddress)
|
||||
}
|
||||
}
|
||||
|
|
@ -152,36 +144,91 @@ func (cfg *K2ClusterSpec) GenerateCertSANs() []string {
|
|||
return defaultCertSANs
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GroupHosts() (*Hosts, *Hosts, *Hosts, *Hosts, *Hosts, *Hosts) {
|
||||
allHosts := Hosts{}
|
||||
etcdHosts := Hosts{}
|
||||
masterHosts := Hosts{}
|
||||
workerHosts := Hosts{}
|
||||
k8sHosts := Hosts{}
|
||||
clientHost := Hosts{}
|
||||
func (cfg *K2ClusterSpec) GroupHosts() *HostGroups {
|
||||
clusterHostsGroups := HostGroups{}
|
||||
etcdGroup, masterGroup, workerGroup := cfg.ParseRolesList()
|
||||
for _, host := range cfg.Hosts {
|
||||
for _, hostName := range etcdGroup {
|
||||
if host.Name == hostName {
|
||||
host.IsEtcd = true
|
||||
clusterHostsGroups.Etcd = append(clusterHostsGroups.Etcd, host)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, hostName := range masterGroup {
|
||||
if host.Name == hostName {
|
||||
host.IsMaster = true
|
||||
clusterHostsGroups.Master = append(clusterHostsGroups.Master, host)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, hostName := range workerGroup {
|
||||
if host.Name == hostName {
|
||||
host.IsWorker = true
|
||||
clusterHostsGroups.Worker = append(clusterHostsGroups.Worker, host)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
clusterHostsGroups.All = append(clusterHostsGroups.All, host)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
//clusterNode := HostCfg{}
|
||||
if host.IsEtcd {
|
||||
etcdHosts.Hosts = append(etcdHosts.Hosts, host)
|
||||
}
|
||||
if host.IsMaster {
|
||||
masterHosts.Hosts = append(masterHosts.Hosts, host)
|
||||
}
|
||||
if host.IsWorker {
|
||||
workerHosts.Hosts = append(workerHosts.Hosts, host)
|
||||
}
|
||||
if host.IsMaster || host.IsWorker {
|
||||
k8sHosts.Hosts = append(k8sHosts.Hosts, host)
|
||||
clusterHostsGroups.K8s = append(clusterHostsGroups.K8s, host)
|
||||
}
|
||||
if host.IsClient {
|
||||
clientHost.Hosts = append(clientHost.Hosts, host)
|
||||
}
|
||||
allHosts.Hosts = append(allHosts.Hosts, host)
|
||||
}
|
||||
return &allHosts, &etcdHosts, &masterHosts, &workerHosts, &k8sHosts, &clientHost
|
||||
|
||||
return &clusterHostsGroups
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) ClusterIP() string {
|
||||
return util.ParseIp(cfg.Network.KubeServiceCIDR)[2]
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) ParseRolesList() ([]string, []string, []string) {
|
||||
etcdGroupList := []string{}
|
||||
masterGroupList := []string{}
|
||||
workerGroupList := []string{}
|
||||
|
||||
for _, host := range cfg.RoleGroups.Etcd {
|
||||
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
|
||||
etcdGroupList = append(etcdGroupList, getHostsRange(host)...)
|
||||
} else {
|
||||
etcdGroupList = append(etcdGroupList, host)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, host := range cfg.RoleGroups.Master {
|
||||
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
|
||||
masterGroupList = append(masterGroupList, getHostsRange(host)...)
|
||||
} else {
|
||||
masterGroupList = append(masterGroupList, host)
|
||||
}
|
||||
}
|
||||
|
||||
for _, host := range cfg.RoleGroups.Worker {
|
||||
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
|
||||
workerGroupList = append(workerGroupList, getHostsRange(host)...)
|
||||
} else {
|
||||
workerGroupList = append(workerGroupList, host)
|
||||
}
|
||||
}
|
||||
return etcdGroupList, masterGroupList, workerGroupList
|
||||
}
|
||||
|
||||
func getHostsRange(rangeStr string) []string {
|
||||
hostRangeList := []string{}
|
||||
r := regexp.MustCompile(`\[(\d+)\:(\d+)\]`)
|
||||
nameSuffix := r.FindStringSubmatch(rangeStr)
|
||||
namePrefix := strings.Split(rangeStr, nameSuffix[0])[0]
|
||||
nameSuffixStart, _ := strconv.Atoi(nameSuffix[1])
|
||||
nameSuffixEnd, _ := strconv.Atoi(nameSuffix[2])
|
||||
for i := nameSuffixStart; i <= nameSuffixEnd; i++ {
|
||||
hostRangeList = append(hostRangeList, fmt.Sprintf("%s%d", namePrefix, i))
|
||||
}
|
||||
return hostRangeList
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,43 @@ import (
|
|||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CephRBD) DeepCopyInto(out *CephRBD) {
|
||||
*out = *in
|
||||
if in.Monitors != nil {
|
||||
in, out := &in.Monitors, &out.Monitors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephRBD.
|
||||
func (in *CephRBD) DeepCopy() *CephRBD {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CephRBD)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControlPlaneEndpoint) DeepCopyInto(out *ControlPlaneEndpoint) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpoint.
|
||||
func (in *ControlPlaneEndpoint) DeepCopy() *ControlPlaneEndpoint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControlPlaneEndpoint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
|
||||
*out = *in
|
||||
|
|
@ -29,14 +66,25 @@ func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GlusterFS) DeepCopyInto(out *GlusterFS) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterFS.
|
||||
func (in *GlusterFS) DeepCopy() *GlusterFS {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GlusterFS)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostCfg) DeepCopyInto(out *HostCfg) {
|
||||
*out = *in
|
||||
if in.Role != nil {
|
||||
in, out := &in.Role, &out.Role
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -51,24 +99,42 @@ func (in *HostCfg) DeepCopy() *HostCfg {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Hosts) DeepCopyInto(out *Hosts) {
|
||||
func (in *HostGroups) DeepCopyInto(out *HostGroups) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
if in.All != nil {
|
||||
in, out := &in.All, &out.All
|
||||
*out = make([]HostCfg, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Etcd != nil {
|
||||
in, out := &in.Etcd, &out.Etcd
|
||||
*out = make([]HostCfg, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Master != nil {
|
||||
in, out := &in.Master, &out.Master
|
||||
*out = make([]HostCfg, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Worker != nil {
|
||||
in, out := &in.Worker, &out.Worker
|
||||
*out = make([]HostCfg, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.K8s != nil {
|
||||
in, out := &in.K8s, &out.K8s
|
||||
*out = make([]HostCfg, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hosts.
|
||||
func (in *Hosts) DeepCopy() *Hosts {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostGroups.
|
||||
func (in *HostGroups) DeepCopy() *HostGroups {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Hosts)
|
||||
out := new(HostGroups)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -140,14 +206,14 @@ func (in *K2ClusterSpec) DeepCopyInto(out *K2ClusterSpec) {
|
|||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]HostCfg, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
copy(*out, *in)
|
||||
}
|
||||
out.LBKubeApiserver = in.LBKubeApiserver
|
||||
out.KubeCluster = in.KubeCluster
|
||||
in.RoleGroups.DeepCopyInto(&out.RoleGroups)
|
||||
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
|
||||
out.Kubernetes = in.Kubernetes
|
||||
out.Network = in.Network
|
||||
in.Registry.DeepCopyInto(&out.Registry)
|
||||
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -178,33 +244,33 @@ func (in *K2ClusterStatus) DeepCopy() *K2ClusterStatus {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeCluster) DeepCopyInto(out *KubeCluster) {
|
||||
func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeCluster.
|
||||
func (in *KubeCluster) DeepCopy() *KubeCluster {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes.
|
||||
func (in *Kubernetes) DeepCopy() *Kubernetes {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeCluster)
|
||||
out := new(Kubernetes)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LBKubeApiserverCfg) DeepCopyInto(out *LBKubeApiserverCfg) {
|
||||
func (in *LocalVolume) DeepCopyInto(out *LocalVolume) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBKubeApiserverCfg.
|
||||
func (in *LBKubeApiserverCfg) DeepCopy() *LBKubeApiserverCfg {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolume.
|
||||
func (in *LocalVolume) DeepCopy() *LocalVolume {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LBKubeApiserverCfg)
|
||||
out := new(LocalVolume)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -225,6 +291,42 @@ func (in *NetworkConfig) DeepCopy() *NetworkConfig {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NfsClient) DeepCopyInto(out *NfsClient) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NfsClient.
|
||||
func (in *NfsClient) DeepCopy() *NfsClient {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NfsClient)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginsCfg) DeepCopyInto(out *PluginsCfg) {
|
||||
*out = *in
|
||||
out.LocalVolume = in.LocalVolume
|
||||
out.NfsClient = in.NfsClient
|
||||
out.GlusterFS = in.GlusterFS
|
||||
in.CephRBD.DeepCopyInto(&out.CephRBD)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginsCfg.
|
||||
func (in *PluginsCfg) DeepCopy() *PluginsCfg {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PluginsCfg)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistryConfig) DeepCopyInto(out *RegistryConfig) {
|
||||
*out = *in
|
||||
|
|
@ -250,3 +352,34 @@ func (in *RegistryConfig) DeepCopy() *RegistryConfig {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RoleGroups) DeepCopyInto(out *RoleGroups) {
|
||||
*out = *in
|
||||
if in.Etcd != nil {
|
||||
in, out := &in.Etcd, &out.Etcd
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Master != nil {
|
||||
in, out := &in.Master, &out.Master
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Worker != nil {
|
||||
in, out := &in.Worker, &out.Worker
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleGroups.
|
||||
func (in *RoleGroups) DeepCopy() *RoleGroups {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RoleGroups)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ func generateCerts(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Conn
|
|||
certsContent[cert] = certsBase64
|
||||
}
|
||||
|
||||
for i := 1; i <= len(mgr.EtcdNodes.Hosts)-1; i++ {
|
||||
for i := 1; i <= len(mgr.EtcdNodes)-1; i++ {
|
||||
certsStr <- certsContent
|
||||
}
|
||||
|
||||
|
|
@ -88,15 +88,15 @@ func generateCertsFiles(mgr *manager.Manager) []string {
|
|||
var certsList []string
|
||||
certsList = append(certsList, "ca.pem")
|
||||
certsList = append(certsList, "ca-key.pem")
|
||||
for _, host := range mgr.EtcdNodes.Hosts {
|
||||
certsList = append(certsList, fmt.Sprintf("admin-%s.pem", host.HostName))
|
||||
certsList = append(certsList, fmt.Sprintf("admin-%s-key.pem", host.HostName))
|
||||
certsList = append(certsList, fmt.Sprintf("member-%s.pem", host.HostName))
|
||||
certsList = append(certsList, fmt.Sprintf("member-%s-key.pem", host.HostName))
|
||||
for _, host := range mgr.EtcdNodes {
|
||||
certsList = append(certsList, fmt.Sprintf("admin-%s.pem", host.Name))
|
||||
certsList = append(certsList, fmt.Sprintf("admin-%s-key.pem", host.Name))
|
||||
certsList = append(certsList, fmt.Sprintf("member-%s.pem", host.Name))
|
||||
certsList = append(certsList, fmt.Sprintf("member-%s-key.pem", host.Name))
|
||||
}
|
||||
for _, host := range mgr.MasterNodes.Hosts {
|
||||
certsList = append(certsList, fmt.Sprintf("node-%s.pem", host.HostName))
|
||||
certsList = append(certsList, fmt.Sprintf("node-%s-key.pem", host.HostName))
|
||||
for _, host := range mgr.MasterNodes {
|
||||
certsList = append(certsList, fmt.Sprintf("node-%s.pem", host.Name))
|
||||
certsList = append(certsList, fmt.Sprintf("node-%s-key.pem", host.Name))
|
||||
}
|
||||
return certsList
|
||||
}
|
||||
|
|
@ -170,10 +170,10 @@ func generateEtcdService(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ss
|
|||
}
|
||||
|
||||
addrList := []string{}
|
||||
for _, host := range mgr.EtcdNodes.Hosts {
|
||||
for _, host := range mgr.EtcdNodes {
|
||||
addrList = append(addrList, fmt.Sprintf("https://%s:2379", host.InternalAddress))
|
||||
}
|
||||
checkHealthCmd := fmt.Sprintf("sudo -E /bin/sh -c \"export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --endpoints=%s cluster-health | grep -q 'cluster is healthy'\"", node.HostName, node.HostName, etcdBinDir, strings.Join(addrList, ","))
|
||||
checkHealthCmd := fmt.Sprintf("sudo -E /bin/sh -c \"export ETCDCTL_API=2;export ETCDCTL_CERT_FILE='/etc/ssl/etcd/ssl/admin-%s.pem';export ETCDCTL_KEY_FILE='/etc/ssl/etcd/ssl/admin-%s-key.pem';export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';%s/etcdctl --endpoints=%s cluster-health | grep -q 'cluster is healthy'\"", node.Name, node.Name, etcdBinDir, strings.Join(addrList, ","))
|
||||
if mgr.Runner.Index == 0 {
|
||||
for i := 20; i > 0; i-- {
|
||||
_, err := mgr.Runner.RunCmd(checkHealthCmd)
|
||||
|
|
|
|||
|
|
@ -164,14 +164,14 @@ func GenerateEtcdSslCfg(cfg *kubekeyapi.K2ClusterSpec) (string, error) {
|
|||
dnsList := []string{"localhost", "etcd.kube-system.svc.cluster.local", "etcd.kube-system.svc", "etcd.kube-system", "etcd"}
|
||||
ipList := []string{"127.0.0.1"}
|
||||
|
||||
if cfg.LBKubeApiserver.Domain == "" {
|
||||
if cfg.ControlPlaneEndpoint.Domain == "" {
|
||||
dnsList = append(dnsList, kubekeyapi.DefaultLBDomain)
|
||||
} else {
|
||||
dnsList = append(dnsList, cfg.LBKubeApiserver.Domain)
|
||||
dnsList = append(dnsList, cfg.ControlPlaneEndpoint.Domain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
dnsList = append(dnsList, host.HostName)
|
||||
dnsList = append(dnsList, host.Name)
|
||||
ipList = append(ipList, host.InternalAddress)
|
||||
}
|
||||
|
||||
|
|
@ -186,11 +186,11 @@ func GenerateEtcdSslScript(mgr *manager.Manager) (string, error) {
|
|||
var hosts []string
|
||||
//_, etcdNodes, masterNodes, _, _ , _:= cfg.GroupHosts()
|
||||
|
||||
for _, host := range mgr.EtcdNodes.Hosts {
|
||||
masters = append(masters, host.HostName)
|
||||
for _, host := range mgr.EtcdNodes {
|
||||
masters = append(masters, host.Name)
|
||||
}
|
||||
for _, host := range mgr.MasterNodes.Hosts {
|
||||
hosts = append(hosts, host.HostName)
|
||||
for _, host := range mgr.MasterNodes {
|
||||
hosts = append(hosts, host.Name)
|
||||
}
|
||||
return util.Render(EtcdSslTempl, util.Data{
|
||||
"Masters": strings.Join(masters, " "),
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func GenerateEtcdService(mgr *manager.Manager, index int) (string, error) {
|
|||
|
||||
func GenerateEtcdEnv(mgr *manager.Manager, node *kubekeyapi.HostCfg, index int) (string, error) {
|
||||
endpoints := []string{}
|
||||
for index, host := range mgr.EtcdNodes.Hosts {
|
||||
for index, host := range mgr.EtcdNodes {
|
||||
endpoints = append(endpoints, fmt.Sprintf("etcd%d=https://%s:2380", index+1, host.InternalAddress))
|
||||
}
|
||||
|
||||
|
|
@ -110,7 +110,7 @@ func GenerateEtcdEnv(mgr *manager.Manager, node *kubekeyapi.HostCfg, index int)
|
|||
"Tag": kubekeyapi.DefaultEtcdVersion,
|
||||
"Name": fmt.Sprintf("etcd%d", index+1),
|
||||
"Ip": node.InternalAddress,
|
||||
"Hostname": node.HostName,
|
||||
"Hostname": node.Name,
|
||||
"Endpoints": strings.Join(endpoints, ","),
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func GetKubeConfig(mgr *manager.Manager) error {
|
|||
|
||||
func removeMasterTaint(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
|
||||
if node.IsWorker {
|
||||
removeMasterTaintCmd := fmt.Sprintf("/usr/local/bin/kubectl taint nodes %s node-role.kubernetes.io/master=:NoSchedule-", node.HostName)
|
||||
removeMasterTaintCmd := fmt.Sprintf("/usr/local/bin/kubectl taint nodes %s node-role.kubernetes.io/master=:NoSchedule-", node.Name)
|
||||
_, err := mgr.Runner.RunCmd(removeMasterTaintCmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to remove master taint")
|
||||
|
|
@ -94,7 +94,7 @@ func removeMasterTaint(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
|
|||
|
||||
func addWorkerLabel(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
|
||||
if node.IsWorker {
|
||||
addWorkerLabelCmd := fmt.Sprintf("/usr/local/bin/kubectl label node %s node-role.kubernetes.io/worker=", node.HostName)
|
||||
addWorkerLabelCmd := fmt.Sprintf("/usr/local/bin/kubectl label node %s node-role.kubernetes.io/worker=", node.Name)
|
||||
out, err := mgr.Runner.RunCmd(addWorkerLabelCmd)
|
||||
if err != nil && !strings.Contains(out, "already") {
|
||||
return errors.Wrap(errors.WithStack(err), "failed to add worker label")
|
||||
|
|
@ -184,7 +184,7 @@ func JoinNodesToCluster(mgr *manager.Manager) error {
|
|||
}
|
||||
|
||||
func joinNodesToCluster(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Connection) error {
|
||||
if !strings.Contains(clusterInfo, node.HostName) && !strings.Contains(clusterInfo, node.InternalAddress) {
|
||||
if !strings.Contains(clusterInfo, node.Name) && !strings.Contains(clusterInfo, node.InternalAddress) {
|
||||
if node.IsMaster {
|
||||
err := addMaster(mgr)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -27,9 +27,9 @@ func syncKubeBinaries(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.C
|
|||
|
||||
filepath := fmt.Sprintf("%s/%s", currentDir, kubekeyapi.DefaultPreDir)
|
||||
|
||||
kubeadm := fmt.Sprintf("kubeadm-%s", mgr.Cluster.KubeCluster.Version)
|
||||
kubelet := fmt.Sprintf("kubelet-%s", mgr.Cluster.KubeCluster.Version)
|
||||
kubectl := fmt.Sprintf("kubectl-%s", mgr.Cluster.KubeCluster.Version)
|
||||
kubeadm := fmt.Sprintf("kubeadm-%s", mgr.Cluster.Kubernetes.Version)
|
||||
kubelet := fmt.Sprintf("kubelet-%s", mgr.Cluster.Kubernetes.Version)
|
||||
kubectl := fmt.Sprintf("kubectl-%s", mgr.Cluster.Kubernetes.Version)
|
||||
helm := fmt.Sprintf("helm-%s", kubekeyapi.DefaultHelmVersion)
|
||||
kubecni := fmt.Sprintf("cni-plugins-linux-%s-%s.tgz", kubekeyapi.DefaultArch, kubekeyapi.DefaultCniVersion)
|
||||
binaryList := []string{kubeadm, kubelet, kubectl, helm, kubecni}
|
||||
|
|
|
|||
|
|
@ -119,25 +119,25 @@ func GenerateKubeadmCfg(mgr *manager.Manager) (string, error) {
|
|||
var endpointsList []string
|
||||
var caFile, certFile, keyFile string
|
||||
|
||||
for _, host := range mgr.EtcdNodes.Hosts {
|
||||
for _, host := range mgr.EtcdNodes {
|
||||
endpoint := fmt.Sprintf("https://%s:%s", host.InternalAddress, kubekeyapi.DefaultEtcdPort)
|
||||
endpointsList = append(endpointsList, endpoint)
|
||||
}
|
||||
externalEtcd.Endpoints = endpointsList
|
||||
|
||||
caFile = "/etc/ssl/etcd/ssl/ca.pem"
|
||||
certFile = fmt.Sprintf("/etc/ssl/etcd/ssl/admin-%s.pem", mgr.EtcdNodes.Hosts[0].HostName)
|
||||
keyFile = fmt.Sprintf("/etc/ssl/etcd/ssl/admin-%s-key.pem", mgr.EtcdNodes.Hosts[0].HostName)
|
||||
certFile = fmt.Sprintf("/etc/ssl/etcd/ssl/admin-%s.pem", mgr.EtcdNodes[0].Name)
|
||||
keyFile = fmt.Sprintf("/etc/ssl/etcd/ssl/admin-%s-key.pem", mgr.EtcdNodes[0].Name)
|
||||
|
||||
externalEtcd.CaFile = caFile
|
||||
externalEtcd.CertFile = certFile
|
||||
externalEtcd.KeyFile = keyFile
|
||||
|
||||
return util.Render(KubeadmCfgTempl, util.Data{
|
||||
"ImageRepo": mgr.Cluster.KubeCluster.ImageRepo,
|
||||
"Version": mgr.Cluster.KubeCluster.Version,
|
||||
"ClusterName": mgr.Cluster.KubeCluster.ClusterName,
|
||||
"ControlPlaneEndpoint": fmt.Sprintf("%s:%s", mgr.Cluster.LBKubeApiserver.Address, mgr.Cluster.LBKubeApiserver.Port),
|
||||
"ImageRepo": mgr.Cluster.Kubernetes.ImageRepo,
|
||||
"Version": mgr.Cluster.Kubernetes.Version,
|
||||
"ClusterName": mgr.Cluster.Kubernetes.ClusterName,
|
||||
"ControlPlaneEndpoint": fmt.Sprintf("%s:%s", mgr.Cluster.ControlPlaneEndpoint.Address, mgr.Cluster.ControlPlaneEndpoint.Port),
|
||||
"PodSubnet": mgr.Cluster.Network.KubePodsCIDR,
|
||||
"ServiceSubnet": mgr.Cluster.Network.KubeServiceCIDR,
|
||||
"CertSANs": mgr.Cluster.GenerateCertSANs(),
|
||||
|
|
|
|||
|
|
@ -23,12 +23,12 @@ func initOsOnNode(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.Conne
|
|||
return errors.Wrap(errors.WithStack(err), "failed to init operating system")
|
||||
}
|
||||
|
||||
_, err1 := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E /bin/sh -c \"hostnamectl set-hostname %s && sed -i '/^127.0.1.1/s/.*/127.0.1.1 %s/g' /etc/hosts\"", node.HostName, node.HostName))
|
||||
_, err1 := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E /bin/sh -c \"hostnamectl set-hostname %s && sed -i '/^127.0.1.1/s/.*/127.0.1.1 %s/g' /etc/hosts\"", node.Name, node.Name))
|
||||
if err1 != nil {
|
||||
return errors.Wrap(errors.WithStack(err1), "failed to override hostname")
|
||||
}
|
||||
|
||||
initOsScript, err2 := tmpl.InitOsScript(mgr.Cluster)
|
||||
initOsScript, err2 := tmpl.InitOsScript(mgr)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
func FilesDownloadHttp(cfg *kubekeyapi.K2ClusterSpec, filepath string, logger *log.Logger) error {
|
||||
kubeVersion := cfg.KubeCluster.Version
|
||||
kubeVersion := cfg.Kubernetes.Version
|
||||
|
||||
kubeadmUrl := fmt.Sprintf("https://kubernetes-release.pek3b.qingstor.com/release/%s/bin/linux/%s/kubeadm", kubeVersion, kubekeyapi.DefaultArch)
|
||||
kubeletUrl := fmt.Sprintf("https://kubernetes-release.pek3b.qingstor.com/release/%s/bin/linux/%s/kubelet", kubeVersion, kubekeyapi.DefaultArch)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package tmpl
|
||||
|
||||
import (
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/lithammer/dedent"
|
||||
"text/template"
|
||||
)
|
||||
|
|
@ -72,9 +72,8 @@ cat >>/etc/hosts<<EOF
|
|||
EOF
|
||||
`)))
|
||||
|
||||
func InitOsScript(cfg *kubekeyapi.K2ClusterSpec) (string, error) {
|
||||
hostlist := cfg.GenerateHosts()
|
||||
func InitOsScript(mgr *manager.Manager) (string, error) {
|
||||
return util.Render(initOsScriptTmpl, util.Data{
|
||||
"Hosts": hostlist,
|
||||
"Hosts": mgr.ClusterHosts,
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,19 +55,19 @@ func SetDefaultK2ClusterSpec(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.K2Cluster
|
|||
clusterCfg := kubekeyapi.K2ClusterSpec{}
|
||||
|
||||
clusterCfg.Hosts = SetDefaultHostsCfg(cfg)
|
||||
clusterCfg.LBKubeApiserver = SetDefaultLBCfg(cfg)
|
||||
clusterCfg.ControlPlaneEndpoint = SetDefaultLBCfg(cfg)
|
||||
clusterCfg.Network = SetDefaultNetworkCfg(cfg)
|
||||
clusterCfg.KubeCluster = SetDefaultClusterCfg(cfg)
|
||||
clusterCfg.Kubernetes = SetDefaultClusterCfg(cfg)
|
||||
clusterCfg.Registry = cfg.Registry
|
||||
clusterCfg.Plugins = cfg.Plugins
|
||||
if cfg.KubeCluster.ImageRepo == "" {
|
||||
clusterCfg.KubeCluster.ImageRepo = kubekeyapi.DefaultKubeImageRepo
|
||||
if cfg.Kubernetes.ImageRepo == "" {
|
||||
clusterCfg.Kubernetes.ImageRepo = kubekeyapi.DefaultKubeImageRepo
|
||||
}
|
||||
if cfg.KubeCluster.ClusterName == "" {
|
||||
clusterCfg.KubeCluster.ClusterName = kubekeyapi.DefaultClusterName
|
||||
if cfg.Kubernetes.ClusterName == "" {
|
||||
clusterCfg.Kubernetes.ClusterName = kubekeyapi.DefaultClusterName
|
||||
}
|
||||
if cfg.KubeCluster.Version == "" {
|
||||
clusterCfg.KubeCluster.Version = kubekeyapi.DefaultKubeVersion
|
||||
if cfg.Kubernetes.Version == "" {
|
||||
clusterCfg.Kubernetes.Version = kubekeyapi.DefaultKubeVersion
|
||||
}
|
||||
return clusterCfg
|
||||
}
|
||||
|
|
@ -77,15 +77,14 @@ func SetDefaultHostsCfg(cfg *kubekeyapi.K2ClusterSpec) []kubekeyapi.HostCfg {
|
|||
if len(cfg.Hosts) == 0 {
|
||||
return nil
|
||||
}
|
||||
clinetNum := 0
|
||||
for index, host := range cfg.Hosts {
|
||||
host.ID = index
|
||||
|
||||
if len(host.SSHAddress) == 0 && len(host.InternalAddress) > 0 {
|
||||
host.SSHAddress = host.InternalAddress
|
||||
if len(host.Address) == 0 && len(host.InternalAddress) > 0 {
|
||||
host.Address = host.InternalAddress
|
||||
}
|
||||
if len(host.InternalAddress) == 0 && len(host.SSHAddress) > 0 {
|
||||
host.InternalAddress = host.SSHAddress
|
||||
if len(host.InternalAddress) == 0 && len(host.Address) > 0 {
|
||||
host.InternalAddress = host.Address
|
||||
}
|
||||
if host.User == "" {
|
||||
host.User = "root"
|
||||
|
|
@ -94,65 +93,31 @@ func SetDefaultHostsCfg(cfg *kubekeyapi.K2ClusterSpec) []kubekeyapi.HostCfg {
|
|||
host.Port = strconv.Itoa(22)
|
||||
}
|
||||
|
||||
for _, role := range host.Role {
|
||||
if role == "etcd" {
|
||||
host.IsEtcd = true
|
||||
}
|
||||
if role == "master" {
|
||||
host.IsMaster = true
|
||||
}
|
||||
if role == "worker" {
|
||||
host.IsWorker = true
|
||||
}
|
||||
if role == "client" {
|
||||
clinetNum++
|
||||
}
|
||||
}
|
||||
hostscfg = append(hostscfg, host)
|
||||
}
|
||||
|
||||
if clinetNum == 0 {
|
||||
for index := range hostscfg {
|
||||
if hostscfg[index].IsMaster {
|
||||
hostscfg[index].IsClient = true
|
||||
hostscfg[index].Role = append(hostscfg[index].Role, "client")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return hostscfg
|
||||
}
|
||||
|
||||
func SetDefaultLBCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.LBKubeApiserverCfg {
|
||||
func SetDefaultLBCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.ControlPlaneEndpoint {
|
||||
masterHosts := []kubekeyapi.HostCfg{}
|
||||
hosts := SetDefaultHostsCfg(cfg)
|
||||
for _, host := range hosts {
|
||||
for _, role := range host.Role {
|
||||
if role == "etcd" {
|
||||
host.IsEtcd = true
|
||||
}
|
||||
if role == "master" {
|
||||
host.IsMaster = true
|
||||
}
|
||||
if role == "worker" {
|
||||
host.IsWorker = true
|
||||
}
|
||||
}
|
||||
if host.IsMaster {
|
||||
masterHosts = append(masterHosts, host)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.LBKubeApiserver.Address == "" {
|
||||
cfg.LBKubeApiserver.Address = masterHosts[0].InternalAddress
|
||||
if cfg.ControlPlaneEndpoint.Address == "" {
|
||||
cfg.ControlPlaneEndpoint.Address = masterHosts[0].InternalAddress
|
||||
}
|
||||
if cfg.LBKubeApiserver.Domain == "" {
|
||||
cfg.LBKubeApiserver.Domain = kubekeyapi.DefaultLBDomain
|
||||
if cfg.ControlPlaneEndpoint.Domain == "" {
|
||||
cfg.ControlPlaneEndpoint.Domain = kubekeyapi.DefaultLBDomain
|
||||
}
|
||||
if cfg.LBKubeApiserver.Port == "" {
|
||||
cfg.LBKubeApiserver.Port = kubekeyapi.DefaultLBPort
|
||||
if cfg.ControlPlaneEndpoint.Port == "" {
|
||||
cfg.ControlPlaneEndpoint.Port = kubekeyapi.DefaultLBPort
|
||||
}
|
||||
defaultLbCfg := cfg.LBKubeApiserver
|
||||
defaultLbCfg := cfg.ControlPlaneEndpoint
|
||||
return defaultLbCfg
|
||||
}
|
||||
|
||||
|
|
@ -172,18 +137,18 @@ func SetDefaultNetworkCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.NetworkConfi
|
|||
return defaultNetworkCfg
|
||||
}
|
||||
|
||||
func SetDefaultClusterCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.KubeCluster {
|
||||
if cfg.KubeCluster.Version == "" {
|
||||
cfg.KubeCluster.Version = kubekeyapi.DefaultKubeVersion
|
||||
func SetDefaultClusterCfg(cfg *kubekeyapi.K2ClusterSpec) kubekeyapi.Kubernetes {
|
||||
if cfg.Kubernetes.Version == "" {
|
||||
cfg.Kubernetes.Version = kubekeyapi.DefaultKubeVersion
|
||||
}
|
||||
if cfg.KubeCluster.ImageRepo == "" {
|
||||
cfg.KubeCluster.ImageRepo = kubekeyapi.DefaultKubeImageRepo
|
||||
if cfg.Kubernetes.ImageRepo == "" {
|
||||
cfg.Kubernetes.ImageRepo = kubekeyapi.DefaultKubeImageRepo
|
||||
}
|
||||
if cfg.KubeCluster.ClusterName == "" {
|
||||
cfg.KubeCluster.ClusterName = kubekeyapi.DefaultClusterName
|
||||
if cfg.Kubernetes.ClusterName == "" {
|
||||
cfg.Kubernetes.ClusterName = kubekeyapi.DefaultClusterName
|
||||
}
|
||||
|
||||
defaultClusterCfg := cfg.KubeCluster
|
||||
defaultClusterCfg := cfg.Kubernetes
|
||||
|
||||
return defaultClusterCfg
|
||||
}
|
||||
|
|
@ -198,14 +163,13 @@ func AllinoneHost(user *user.User) kubekeyapi.K2Cluster {
|
|||
}
|
||||
|
||||
allinoneCfg.Spec.Hosts = append(allinoneCfg.Spec.Hosts, kubekeyapi.HostCfg{
|
||||
HostName: "ks-allinone",
|
||||
SSHAddress: "",
|
||||
Name: "ks-allinone",
|
||||
Address: "",
|
||||
InternalAddress: util.LocalIP(),
|
||||
Port: "",
|
||||
User: user.Name,
|
||||
Password: "",
|
||||
SSHKeyPath: fmt.Sprintf("%s/.ssh/id_rsa", user.HomeDir),
|
||||
Role: []string{"master", "worker", "etcd"},
|
||||
PrivateKeyPath: fmt.Sprintf("%s/.ssh/id_rsa", user.HomeDir),
|
||||
})
|
||||
return allinoneCfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,10 +49,10 @@ func preDownloadImages(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.
|
|||
imagesList := []Image{
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "etcd", Tag: kubekeyapi.DefaultEtcdVersion, Group: "etcd", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "pause", Tag: "3.1", Group: "k8s", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-apiserver", Tag: mgr.Cluster.KubeCluster.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-controller-manager", Tag: mgr.Cluster.KubeCluster.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-scheduler", Tag: mgr.Cluster.KubeCluster.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-proxy", Tag: mgr.Cluster.KubeCluster.Version, Group: "k8s", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-apiserver", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-controller-manager", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-scheduler", Tag: mgr.Cluster.Kubernetes.Version, Group: "master", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, kubekeyapi.DefaultKubeImageRepo), Repo: "kube-proxy", Tag: mgr.Cluster.Kubernetes.Version, Group: "k8s", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "coredns"), Repo: "coredns", Tag: "1.6.0", Group: "k8s", Enable: true},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "kube-controllers", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
|
||||
{Prefix: GetImagePrefix(mgr.Cluster.Registry.PrivateRegistry, "calico"), Repo: "cni", Tag: kubekeyapi.DefaultCalicoVersion, Group: "k8s", Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
|
||||
|
|
@ -64,21 +64,21 @@ func preDownloadImages(mgr *manager.Manager, node *kubekeyapi.HostCfg, conn ssh.
|
|||
for _, image := range imagesList {
|
||||
|
||||
if node.IsMaster && image.Group == "master" && image.Enable {
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.HostName, image.NewImage())
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
|
||||
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
|
||||
}
|
||||
}
|
||||
if (node.IsMaster || node.IsWorker) && image.Group == "k8s" && image.Enable {
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.HostName, image.NewImage())
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
|
||||
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
|
||||
}
|
||||
}
|
||||
if node.IsEtcd && image.Group == "etcd" && image.Enable {
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.HostName, image.NewImage())
|
||||
fmt.Printf("[%s] Downloading image: %s\n", node.Name, image.NewImage())
|
||||
_, err := mgr.Runner.RunCmd(fmt.Sprintf("sudo -E docker pull %s", image.NewImage()))
|
||||
if err != nil {
|
||||
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to download image: %s", image.NewImage()))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package install
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/kubesphere/kubekey/pkg/util/ssh"
|
||||
|
|
@ -31,17 +32,37 @@ func (executor *Executor) Execute() error {
|
|||
|
||||
func (executor *Executor) createManager() (*manager.Manager, error) {
|
||||
mgr := &manager.Manager{}
|
||||
allNodes, etcdNodes, masterNodes, workerNodes, k8sNodes, clientNode := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = allNodes
|
||||
mgr.EtcdNodes = etcdNodes
|
||||
mgr.MasterNodes = masterNodes
|
||||
mgr.WorkerNodes = workerNodes
|
||||
mgr.K8sNodes = k8sNodes
|
||||
mgr.ClientNode = clientNode
|
||||
hostGroups := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = hostGroups.All
|
||||
mgr.EtcdNodes = hostGroups.Etcd
|
||||
mgr.MasterNodes = hostGroups.Master
|
||||
mgr.WorkerNodes = hostGroups.Worker
|
||||
mgr.K8sNodes = hostGroups.K8s
|
||||
mgr.Cluster = executor.cluster
|
||||
mgr.ClusterHosts = GenerateHosts(hostGroups, executor.cluster)
|
||||
mgr.Connector = ssh.NewConnector()
|
||||
mgr.Logger = executor.logger
|
||||
mgr.Verbose = executor.Verbose
|
||||
|
||||
return mgr, nil
|
||||
}
|
||||
|
||||
func GenerateHosts(hostGroups *kubekeyapi.HostGroups, cfg *kubekeyapi.K2ClusterSpec) []string {
|
||||
var lbHost string
|
||||
hostsList := []string{}
|
||||
|
||||
if cfg.ControlPlaneEndpoint.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", cfg.ControlPlaneEndpoint.Address, cfg.ControlPlaneEndpoint.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", hostGroups.Master[0].InternalAddress, cfg.ControlPlaneEndpoint.Domain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
if host.Name != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.Name, cfg.Kubernetes.ClusterName, host.Name))
|
||||
}
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
return hostsList
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package reset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/kubesphere/kubekey/pkg/util/ssh"
|
||||
|
|
@ -10,12 +11,14 @@ import (
|
|||
type Executor struct {
|
||||
cluster *kubekeyapi.K2ClusterSpec
|
||||
logger *log.Logger
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
func NewExecutor(cluster *kubekeyapi.K2ClusterSpec, logger *log.Logger) *Executor {
|
||||
func NewExecutor(cluster *kubekeyapi.K2ClusterSpec, logger *log.Logger, verbose bool) *Executor {
|
||||
return &Executor{
|
||||
cluster: cluster,
|
||||
logger: logger,
|
||||
Verbose: verbose,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -29,17 +32,37 @@ func (executor *Executor) Execute() error {
|
|||
|
||||
func (executor *Executor) createManager() (*manager.Manager, error) {
|
||||
mgr := &manager.Manager{}
|
||||
allNodes, etcdNodes, masterNodes, workerNodes, k8sNodes, clientNode := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = allNodes
|
||||
mgr.EtcdNodes = etcdNodes
|
||||
mgr.MasterNodes = masterNodes
|
||||
mgr.WorkerNodes = workerNodes
|
||||
mgr.K8sNodes = k8sNodes
|
||||
mgr.ClientNode = clientNode
|
||||
hostGroups := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = hostGroups.All
|
||||
mgr.EtcdNodes = hostGroups.Etcd
|
||||
mgr.MasterNodes = hostGroups.Master
|
||||
mgr.WorkerNodes = hostGroups.Worker
|
||||
mgr.K8sNodes = hostGroups.K8s
|
||||
mgr.Cluster = executor.cluster
|
||||
mgr.ClusterHosts = GenerateHosts(hostGroups, executor.cluster)
|
||||
mgr.Connector = ssh.NewConnector()
|
||||
mgr.Logger = executor.logger
|
||||
mgr.Verbose = true
|
||||
mgr.Verbose = executor.Verbose
|
||||
|
||||
return mgr, nil
|
||||
}
|
||||
|
||||
func GenerateHosts(hostGroups *kubekeyapi.HostGroups, cfg *kubekeyapi.K2ClusterSpec) []string {
|
||||
var lbHost string
|
||||
hostsList := []string{}
|
||||
|
||||
if cfg.ControlPlaneEndpoint.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", cfg.ControlPlaneEndpoint.Address, cfg.ControlPlaneEndpoint.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", hostGroups.Master[0].InternalAddress, cfg.ControlPlaneEndpoint.Domain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
if host.Name != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.Name, cfg.Kubernetes.ClusterName, host.Name))
|
||||
}
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
return hostsList
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func ResetCluster(clusterCfgFile string, logger *log.Logger) error {
|
||||
func ResetCluster(clusterCfgFile string, logger *log.Logger, verbose bool) error {
|
||||
cfg, err := config.ParseClusterCfg(clusterCfgFile, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to download cluster config")
|
||||
|
|
@ -22,7 +22,7 @@ func ResetCluster(clusterCfgFile string, logger *log.Logger) error {
|
|||
if err := preinstall.Prepare(&cfg.Spec, logger); err != nil {
|
||||
return errors.Wrap(err, "failed to load kube binarys")
|
||||
}
|
||||
return NewExecutor(&cfg.Spec, logger).Execute()
|
||||
return NewExecutor(&cfg.Spec, logger, verbose).Execute()
|
||||
}
|
||||
|
||||
func ExecTasks(mgr *manager.Manager) error {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package scale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
ssh "github.com/kubesphere/kubekey/pkg/util/ssh"
|
||||
|
|
@ -31,17 +32,37 @@ func (executor *Executor) Execute() error {
|
|||
|
||||
func (executor *Executor) createManager() (*manager.Manager, error) {
|
||||
mgr := &manager.Manager{}
|
||||
allNodes, etcdNodes, masterNodes, workerNodes, k8sNodes, clientNode := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = allNodes
|
||||
mgr.EtcdNodes = etcdNodes
|
||||
mgr.MasterNodes = masterNodes
|
||||
mgr.WorkerNodes = workerNodes
|
||||
mgr.K8sNodes = k8sNodes
|
||||
mgr.ClientNode = clientNode
|
||||
hostGroups := executor.cluster.GroupHosts()
|
||||
mgr.AllNodes = hostGroups.All
|
||||
mgr.EtcdNodes = hostGroups.Etcd
|
||||
mgr.MasterNodes = hostGroups.Master
|
||||
mgr.WorkerNodes = hostGroups.Worker
|
||||
mgr.K8sNodes = hostGroups.K8s
|
||||
mgr.Cluster = executor.cluster
|
||||
mgr.ClusterHosts = GenerateHosts(hostGroups, executor.cluster)
|
||||
mgr.Connector = ssh.NewConnector()
|
||||
mgr.Logger = executor.logger
|
||||
mgr.Verbose = executor.Verbose
|
||||
|
||||
return mgr, nil
|
||||
}
|
||||
|
||||
func GenerateHosts(hostGroups *kubekeyapi.HostGroups, cfg *kubekeyapi.K2ClusterSpec) []string {
|
||||
var lbHost string
|
||||
hostsList := []string{}
|
||||
|
||||
if cfg.ControlPlaneEndpoint.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", cfg.ControlPlaneEndpoint.Address, cfg.ControlPlaneEndpoint.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", hostGroups.Master[0].InternalAddress, cfg.ControlPlaneEndpoint.Domain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
if host.Name != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.Name, cfg.Kubernetes.ClusterName, host.Name))
|
||||
}
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
return hostsList
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,20 +13,21 @@ import (
|
|||
)
|
||||
|
||||
type Manager struct {
|
||||
Cluster *kubekeyapi.K2ClusterSpec
|
||||
Logger logrus.FieldLogger
|
||||
Connector *ssh2.Dialer
|
||||
Runner *runner.Runner
|
||||
AllNodes *kubekeyapi.Hosts
|
||||
EtcdNodes *kubekeyapi.Hosts
|
||||
MasterNodes *kubekeyapi.Hosts
|
||||
WorkerNodes *kubekeyapi.Hosts
|
||||
K8sNodes *kubekeyapi.Hosts
|
||||
ClientNode *kubekeyapi.Hosts
|
||||
WorkDir string
|
||||
JoinCommand string
|
||||
JoinToken string
|
||||
Verbose bool
|
||||
Cluster *kubekeyapi.K2ClusterSpec
|
||||
Logger logrus.FieldLogger
|
||||
Connector *ssh2.Dialer
|
||||
Runner *runner.Runner
|
||||
AllNodes []kubekeyapi.HostCfg
|
||||
EtcdNodes []kubekeyapi.HostCfg
|
||||
MasterNodes []kubekeyapi.HostCfg
|
||||
WorkerNodes []kubekeyapi.HostCfg
|
||||
K8sNodes []kubekeyapi.HostCfg
|
||||
ClientNode []kubekeyapi.HostCfg
|
||||
ClusterHosts []string
|
||||
WorkDir string
|
||||
JoinCommand string
|
||||
JoinToken string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
func (mgr *Manager) KubeadmVerboseFlag() string {
|
||||
|
|
|
|||
|
|
@ -29,12 +29,12 @@ func (mgr *Manager) runTask(node *kubekeyapi.HostCfg, task NodeTask, prefixed bo
|
|||
// because we want to re-use it for future task)
|
||||
conn, err = mgr.Connector.Connect(*node)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to connect to %s", node.SSHAddress)
|
||||
return errors.Wrapf(err, "failed to connect to %s", node.Address)
|
||||
}
|
||||
|
||||
prefix := ""
|
||||
if prefixed {
|
||||
prefix = fmt.Sprintf("[%s] ", node.HostName)
|
||||
prefix = fmt.Sprintf("[%s] ", node.Name)
|
||||
}
|
||||
|
||||
mgr.Runner = &runner.Runner{
|
||||
|
|
@ -76,7 +76,7 @@ func (mgr *Manager) RunTaskOnNodes(nodes []kubekeyapi.HostCfg, task NodeTask, pa
|
|||
|
||||
for i := range nodes {
|
||||
mgrTask := mgr.Clone()
|
||||
mgrTask.Logger = mgrTask.Logger.WithField("node", nodes[i].SSHAddress)
|
||||
mgrTask.Logger = mgrTask.Logger.WithField("node", nodes[i].Address)
|
||||
|
||||
if parallel {
|
||||
ccons <- struct{}{}
|
||||
|
|
@ -107,42 +107,42 @@ func (mgr *Manager) RunTaskOnNodes(nodes []kubekeyapi.HostCfg, task NodeTask, pa
|
|||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnAllNodes(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.AllNodes.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.AllNodes, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnEtcdNodes(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.EtcdNodes.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.EtcdNodes, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnMasterNodes(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.MasterNodes.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.MasterNodes, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnWorkerNodes(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.WorkerNodes.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.WorkerNodes, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnK8sNodes(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.K8sNodes.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.K8sNodes, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *Manager) RunTaskOnClientNode(task NodeTask, parallel bool) error {
|
||||
if err := mgr.RunTaskOnNodes(mgr.ClientNode.Hosts, task, parallel); err != nil {
|
||||
if err := mgr.RunTaskOnNodes(mgr.ClientNode, task, parallel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (r *Runner) RunCmd(cmd string) (string, error) {
|
|||
if output != "" {
|
||||
if strings.Contains(cmd, "base64") && strings.Contains(cmd, "--wrap=0") || strings.Contains(cmd, "make-ssl-etcd.sh") || strings.Contains(cmd, "docker-install.sh") || strings.Contains(cmd, "docker pull") {
|
||||
} else {
|
||||
fmt.Printf("[%s %s] MSG:\n", r.Host.HostName, r.Host.SSHAddress)
|
||||
fmt.Printf("[%s %s] MSG:\n", r.Host.Name, r.Host.Address)
|
||||
fmt.Println(output)
|
||||
}
|
||||
}
|
||||
|
|
@ -55,12 +55,12 @@ func (r *Runner) ScpFile(src, dst string) error {
|
|||
err := r.Conn.Scp(src, dst)
|
||||
if err != nil {
|
||||
if r.Verbose {
|
||||
fmt.Printf("push %s to %s:%s Failed\n", src, r.Host.SSHAddress, dst)
|
||||
fmt.Printf("push %s to %s:%s Failed\n", src, r.Host.Address, dst)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if r.Verbose {
|
||||
fmt.Printf("push %s to %s:%s Done\n", src, r.Host.SSHAddress, dst)
|
||||
fmt.Printf("push %s to %s:%s Done\n", src, r.Host.Address, dst)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -52,9 +52,9 @@ func (dialer *Dialer) Connect(host kubekeyapi.HostCfg) (Connection, error) {
|
|||
opts := SSHCfg{
|
||||
Username: host.User,
|
||||
Port: port,
|
||||
Address: host.SSHAddress,
|
||||
Address: host.Address,
|
||||
Password: host.Password,
|
||||
KeyFile: host.SSHKeyPath,
|
||||
KeyFile: host.PrivateKeyPath,
|
||||
//AgentSocket: host.SSHAgentSocket,
|
||||
Timeout: 10 * time.Second,
|
||||
//Bastion: host.Bastion,
|
||||
|
|
|
|||
Loading…
Reference in New Issue