The roleGroups field supports to be customized

Signed-off-by: 24sama <leo@kubesphere.io>
This commit is contained in:
24sama 2022-01-18 17:57:00 +08:00
parent 301611c1f0
commit f37e29f32d
13 changed files with 154 additions and 379 deletions

View File

@ -18,6 +18,7 @@ package v1alpha2
import (
"fmt"
"github.com/kubesphere/kubekey/pkg/core/connector"
"regexp"
"strconv"
"strings"
@ -39,7 +40,7 @@ type ClusterSpec struct {
// Foo is an example field of Cluster. Edit Cluster_types.go to remove/update
Hosts []HostCfg `yaml:"hosts" json:"hosts,omitempty"`
RoleGroups RoleGroups `yaml:"roleGroups" json:"roleGroups,omitempty"`
RoleGroups map[string][]string `yaml:"roleGroups" json:"roleGroups,omitempty"`
ControlPlaneEndpoint ControlPlaneEndpoint `yaml:"controlPlaneEndpoint" json:"controlPlaneEndpoint,omitempty"`
System System `yaml:"system" json:"system,omitempty"`
Kubernetes Kubernetes `yaml:"kubernetes" json:"kubernetes,omitempty"`
@ -148,31 +149,8 @@ type HostCfg struct {
PrivateKeyPath string `yaml:"privateKeyPath,omitempty" json:"privateKeyPath,omitempty"`
Arch string `yaml:"arch,omitempty" json:"arch,omitempty"`
Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"`
ID string `yaml:"id,omitempty" json:"id,omitempty"`
Index int `json:"-"`
IsEtcd bool `json:"-"`
IsMaster bool `json:"-"`
IsWorker bool `json:"-"`
IsRegistry bool `json:"-"`
}
// RoleGroups defines the grouping of role for hosts (etcd / master / worker / registry).
type RoleGroups struct {
Etcd []string `yaml:"etcd" json:"etcd,omitempty"`
Master []string `yaml:"master" json:"master,omitempty"`
Worker []string `yaml:"worker" json:"worker,omitempty"`
Registry []string `yaml:"registry" json:"registry,omitempty"`
}
// HostGroups defines the grouping of hosts for cluster (all / etcd / master / worker / k8s).
type HostGroups struct {
All []HostCfg
Etcd []HostCfg
Master []HostCfg
Worker []HostCfg
K8s []HostCfg
Registry []HostCfg
Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"`
ID string `yaml:"id,omitempty" json:"id,omitempty"`
}
// ControlPlaneEndpoint defines the control plane endpoint information for cluster.
@ -246,99 +224,49 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string {
}
// GroupHosts is used to group hosts according to the configuration file.s
func (cfg *ClusterSpec) GroupHosts() (*HostGroups, error) {
clusterHostsGroups := HostGroups{}
hostList := map[string]string{}
for _, host := range cfg.Hosts {
hostList[host.Name] = host.Name
func (cfg *ClusterSpec) GroupHosts() (map[string][]*connector.BaseHost, error) {
hostMap := make(map[string]*connector.BaseHost)
for _, hostCfg := range cfg.Hosts {
host := toHosts(hostCfg)
hostMap[host.Name] = host
}
etcdGroup, masterGroup, workerGroup, registryGroup, err := cfg.ParseRolesList(hostList)
roleGroups, err := cfg.ParseRolesList(hostMap)
if err != nil {
return nil, err
}
for index, host := range cfg.Hosts {
host.Index = index
if len(etcdGroup) > 0 {
for _, hostName := range etcdGroup {
if host.Name == hostName {
host.IsEtcd = true
break
}
}
}
if len(masterGroup) > 0 {
for _, hostName := range masterGroup {
if host.Name == hostName {
host.IsMaster = true
break
}
}
}
if len(workerGroup) > 0 {
for _, hostName := range workerGroup {
if hostName != "" && host.Name == hostName {
host.IsWorker = true
break
}
}
}
if len(registryGroup) > 0 {
for _, hostName := range registryGroup {
if hostName != "" && host.Name == hostName {
host.IsRegistry = true
}
}
}
if host.IsEtcd {
clusterHostsGroups.Etcd = append(clusterHostsGroups.Etcd, host)
}
if host.IsMaster {
clusterHostsGroups.Master = append(clusterHostsGroups.Master, host)
}
if host.IsWorker {
clusterHostsGroups.Worker = append(clusterHostsGroups.Worker, host)
}
if host.IsMaster || host.IsWorker {
clusterHostsGroups.K8s = append(clusterHostsGroups.K8s, host)
}
if host.IsRegistry {
clusterHostsGroups.Registry = append(clusterHostsGroups.Registry, host)
}
clusterHostsGroups.All = append(clusterHostsGroups.All, host)
}
//Check that the parameters under roleGroups are incorrect
if len(masterGroup) == 0 {
logger.Log.Fatal(errors.New("The number of master cannot be 0"))
if len(roleGroups[Master]) == 0 && len(roleGroups[ControlPlane]) == 0 {
logger.Log.Fatal(errors.New("The number of master/control-plane cannot be 0"))
}
if len(etcdGroup) == 0 {
if len(roleGroups[Etcd]) == 0 {
logger.Log.Fatal(errors.New("The number of etcd cannot be 0"))
}
if len(registryGroup) > 1 {
if len(roleGroups[Registry]) > 1 {
logger.Log.Fatal(errors.New("The number of registry node cannot be greater than 1."))
}
if len(masterGroup) != len(clusterHostsGroups.Master) {
return nil, errors.New("Incorrect nodeName under roleGroups/master in the configuration file")
}
if len(etcdGroup) != len(clusterHostsGroups.Etcd) {
return nil, errors.New("Incorrect nodeName under roleGroups/etcd in the configuration file")
}
if len(workerGroup) != len(clusterHostsGroups.Worker) {
return nil, errors.New("Incorrect nodeName under roleGroups/work in the configuration file")
}
if len(registryGroup) != len(clusterHostsGroups.Registry) {
return nil, errors.New("Incorrect nodeName under roleGroups/registry in the configuration file")
for _, host := range roleGroups[ControlPlane] {
host.SetRole(Master)
roleGroups[Master] = append(roleGroups[Master], host)
}
return &clusterHostsGroups, nil
return roleGroups, nil
}
func toHosts(cfg HostCfg) *connector.BaseHost {
host := connector.NewHost()
host.Name = cfg.Name
host.Address = cfg.Address
host.InternalAddress = cfg.InternalAddress
host.Port = cfg.Port
host.User = cfg.User
host.Password = cfg.Password
host.PrivateKey = cfg.PrivateKey
host.PrivateKeyPath = cfg.PrivateKeyPath
host.Arch = cfg.Arch
return host
}
// ClusterIP is used to get the kube-apiserver service address inside the cluster.
@ -361,60 +289,44 @@ func (cfg *ClusterSpec) ClusterDNS() string {
}
// ParseRolesList is used to parse the host grouping list.
func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string) ([]string, []string, []string, []string, error) {
etcdGroupList := make([]string, 0)
masterGroupList := make([]string, 0)
workerGroupList := make([]string, 0)
registryGroupList := make([]string, 0)
for _, host := range cfg.RoleGroups.Etcd {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
etcdGroupList = append(etcdGroupList, getHostsRange(host, hostList, "etcd")...)
} else {
if err := hostVerify(hostList, host, "etcd"); err != nil {
logger.Log.Fatal(err)
func (cfg *ClusterSpec) ParseRolesList(hostMap map[string]*connector.BaseHost) (map[string][]*connector.BaseHost, error) {
roleGroupLists := make(map[string][]*connector.BaseHost)
for role, hosts := range cfg.RoleGroups {
roleGroup := make([]string, 0)
for _, host := range hosts {
h := make([]string, 0)
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
rangeHosts := getHostsRange(host, hostMap, role)
h = append(h, rangeHosts...)
} else {
if err := hostVerify(hostMap, host, role); err != nil {
logger.Log.Fatal(err)
}
h = append(h, host)
}
roleGroup = append(roleGroup, h...)
for _, hostName := range h {
if h, ok := hostMap[hostName]; ok {
roleGroupAppend(roleGroupLists, role, h)
} else {
return roleGroupLists, fmt.Errorf("incorrect nodeName under roleGroups/%s in the configuration file", role)
}
}
etcdGroupList = append(etcdGroupList, host)
}
}
for _, host := range cfg.RoleGroups.Master {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
masterGroupList = append(masterGroupList, getHostsRange(host, hostList, "master")...)
} else {
if err := hostVerify(hostList, host, "master"); err != nil {
logger.Log.Fatal(err)
}
masterGroupList = append(masterGroupList, host)
}
}
for _, host := range cfg.RoleGroups.Worker {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
workerGroupList = append(workerGroupList, getHostsRange(host, hostList, "worker")...)
} else {
if err := hostVerify(hostList, host, "worker"); err != nil {
logger.Log.Fatal(err)
}
workerGroupList = append(workerGroupList, host)
}
}
for _, host := range cfg.RoleGroups.Registry {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
registryGroupList = append(registryGroupList, getHostsRange(host, hostList, "registry")...)
} else {
if err := hostVerify(hostList, host, "registry"); err != nil {
logger.Log.Fatal(err)
}
registryGroupList = append(registryGroupList, host)
}
}
return etcdGroupList, masterGroupList, workerGroupList, registryGroupList, nil
return roleGroupLists, nil
}
func getHostsRange(rangeStr string, hostList map[string]string, group string) []string {
func roleGroupAppend(roleGroupLists map[string][]*connector.BaseHost, role string, host *connector.BaseHost) {
host.SetRole(role)
r := roleGroupLists[role]
r = append(r, host)
roleGroupLists[role] = r
}
func getHostsRange(rangeStr string, hostMap map[string]*connector.BaseHost, group string) []string {
hostRangeList := make([]string, 0)
r := regexp.MustCompile(`\[(\d+)\:(\d+)\]`)
nameSuffix := r.FindStringSubmatch(rangeStr)
@ -422,7 +334,7 @@ func getHostsRange(rangeStr string, hostList map[string]string, group string) []
nameSuffixStart, _ := strconv.Atoi(nameSuffix[1])
nameSuffixEnd, _ := strconv.Atoi(nameSuffix[2])
for i := nameSuffixStart; i <= nameSuffixEnd; i++ {
if err := hostVerify(hostList, fmt.Sprintf("%s%d", namePrefix, i), group); err != nil {
if err := hostVerify(hostMap, fmt.Sprintf("%s%d", namePrefix, i), group); err != nil {
logger.Log.Fatal(err)
}
hostRangeList = append(hostRangeList, fmt.Sprintf("%s%d", namePrefix, i))
@ -430,8 +342,8 @@ func getHostsRange(rangeStr string, hostList map[string]string, group string) []
return hostRangeList
}
func hostVerify(hostList map[string]string, hostName string, group string) error {
if _, ok := hostList[hostName]; !ok {
func hostVerify(hostMap map[string]*connector.BaseHost, hostName string, group string) error {
if _, ok := hostMap[hostName]; !ok {
return fmt.Errorf("[%s] is in [%s] group, but not in hosts list", hostName, group)
}
return nil

View File

@ -18,6 +18,7 @@ package v1alpha2
import (
"fmt"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/util"
"os"
"strings"
@ -63,8 +64,10 @@ const (
DefaultIsulaEndpoint = "unix:///var/run/isulad.sock"
Etcd = "etcd"
Master = "master"
ControlPlane = "control-plane"
Worker = "worker"
K8s = "k8s"
Registry = "registry"
DefaultEtcdBackupDir = "/var/backups/kube_etcd"
DefaultEtcdBackupPeriod = 30
DefaultKeepBackNumber = 5
@ -84,16 +87,16 @@ const (
Haproxy = "haproxy"
)
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, *HostGroups, error) {
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, map[string][]*connector.BaseHost, error) {
clusterCfg := ClusterSpec{}
clusterCfg.Hosts = SetDefaultHostsCfg(cfg)
clusterCfg.RoleGroups = cfg.RoleGroups
hostGroups, err := clusterCfg.GroupHosts()
roleGroups, err := clusterCfg.GroupHosts()
if err != nil {
return nil, nil, err
}
clusterCfg.ControlPlaneEndpoint = SetDefaultLBCfg(cfg, hostGroups.Master, incluster)
clusterCfg.ControlPlaneEndpoint = SetDefaultLBCfg(cfg, roleGroups[Master], incluster)
clusterCfg.Network = SetDefaultNetworkCfg(cfg)
clusterCfg.System = cfg.System
clusterCfg.Kubernetes = SetDefaultClusterCfg(cfg)
@ -116,7 +119,7 @@ func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, *Ho
if cfg.Kubernetes.ProxyMode == "" {
clusterCfg.Kubernetes.ProxyMode = DefaultProxyMode
}
return &clusterCfg, hostGroups, nil
return &clusterCfg, roleGroups, nil
}
func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
@ -155,7 +158,7 @@ func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
return hostCfg
}
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []HostCfg, incluster bool) ControlPlaneEndpoint {
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*connector.BaseHost, incluster bool) ControlPlaneEndpoint {
if !incluster {
//The detection is not an HA environment, and the address at LB does not need input
if len(masterGroup) == 1 && cfg.ControlPlaneEndpoint.Address != "" {

View File

@ -160,7 +160,21 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.RoleGroups.DeepCopyInto(&out.RoleGroups)
if in.RoleGroups != nil {
in, out := &in.RoleGroups, &out.RoleGroups
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
in.System.DeepCopyInto(&out.System)
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
@ -490,63 +504,6 @@ func (in *HostCfg) DeepCopy() *HostCfg {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostGroups) DeepCopyInto(out *HostGroups) {
*out = *in
if in.All != nil {
in, out := &in.All, &out.All
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Etcd != nil {
in, out := &in.Etcd, &out.Etcd
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Worker != nil {
in, out := &in.Worker, &out.Worker
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.K8s != nil {
in, out := &in.K8s, &out.K8s
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Registry != nil {
in, out := &in.Registry, &out.Registry
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostGroups.
func (in *HostGroups) DeepCopy() *HostGroups {
if in == nil {
return nil
}
out := new(HostGroups)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Iso) DeepCopyInto(out *Iso) {
*out = *in
@ -931,41 +888,6 @@ func (in *Repository) DeepCopy() *Repository {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleGroups) DeepCopyInto(out *RoleGroups) {
*out = *in
if in.Etcd != nil {
in, out := &in.Etcd, &out.Etcd
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Worker != nil {
in, out := &in.Worker, &out.Worker
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Registry != nil {
in, out := &in.Registry, &out.Registry
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleGroups.
func (in *RoleGroups) DeepCopy() *RoleGroups {
if in == nil {
return nil
}
out := new(RoleGroups)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sources) DeepCopyInto(out *Sources) {
*out = *in

View File

@ -616,25 +616,10 @@ spec:
type: string
type: object
roleGroups:
description: RoleGroups defines the grouping of role for hosts (etcd
/ master / worker / registry).
properties:
etcd:
items:
type: string
type: array
master:
items:
type: string
type: array
registry:
items:
type: string
type: array
worker:
items:
type: string
type: array
additionalProperties:
items:
type: string
type: array
type: object
system:
description: System defines the system config for each node in cluster.

View File

@ -255,7 +255,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
for _, node := range nodeList.Items {
currentNodes[node.Name] = node.Name
}
for _, etcd := range cluster.Spec.RoleGroups.Etcd {
for _, etcd := range cluster.Spec.RoleGroups["etcd"] {
if _, ok := currentNodes[etcd]; !ok {
currentNodes[etcd] = etcd
}
@ -296,10 +296,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
cluster.Spec.Hosts = newHosts
cluster.Spec.RoleGroups = kubekeyv1alpha2.RoleGroups{
Etcd: newEtcd,
Master: newMaster,
Worker: newWorker,
cluster.Spec.RoleGroups = map[string][]string{
"master": newMaster,
"etcd": newEtcd,
"worker": newWorker,
}
if err := r.Update(ctx, cluster); err != nil {
@ -770,7 +770,7 @@ func currentClusterDiff(r *ClusterReconciler, ctx context.Context, c *kubekeyv1a
}
} else {
if kubeErr.IsNotFound(err) {
for _, etcdHostName := range c.Spec.RoleGroups.Etcd {
for _, etcdHostName := range c.Spec.RoleGroups["etcd"] {
etcdSpecMap[etcdHostName] = true
}
} else {
@ -946,7 +946,7 @@ func otherClusterDiff(r *ClusterReconciler, ctx context.Context, c *kubekeyv1alp
}
} else {
if kubeErr.IsNotFound(err) {
for _, etcdHostName := range c.Spec.RoleGroups.Etcd {
for _, etcdHostName := range c.Spec.RoleGroups["etcd"] {
etcdSpecMap[etcdHostName] = true
}
} else {
@ -956,6 +956,7 @@ func otherClusterDiff(r *ClusterReconciler, ctx context.Context, c *kubekeyv1alp
// add etcd node info to current hosts map
for _, host := range allSpecHostsMap {
_, nameOk := etcdSpecMap[host.Name]
_, ipOk := etcdSpecMap[host.Address]
if !nameOk && !ipOk {

View File

@ -5,8 +5,8 @@ metadata:
name: sample
spec:
hosts:
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, port: 8022, user: ubuntu, password: Qcloud@123} # Assume that the default port for SSH is 22. Otherwise, add the port number after the IP address. If you install Kubernetes on ARM, add "arch: arm64". For example, {...user: ubuntu, password: Qcloud@123, arch: arm64}.
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, password: Qcloud@123} # For default root user.
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, port: 8022, user: ubuntu, password: "Qcloud@123"} # Assume that the default port for SSH is 22. Otherwise, add the port number after the IP address. If you install Kubernetes on ARM, add "arch: arm64". For example, {...user: ubuntu, password: Qcloud@123, arch: arm64}.
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, password: "Qcloud@123"} # For default root user.
- {name: node3, address: 172.16.0.4, internalAddress: 172.16.0.4, privateKeyPath: "~/.ssh/id_rsa"} # For password-less login with SSH keys.
roleGroups:
etcd:

View File

@ -29,12 +29,11 @@ type KubeAction struct {
func (k *KubeAction) AutoAssert(runtime connector.Runtime) {
kubeRuntime := runtime.(*KubeRuntime)
conf := &KubeConf{
ClusterHosts: kubeRuntime.ClusterHosts,
Cluster: kubeRuntime.Cluster,
ClusterName: kubeRuntime.ClusterName,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
Cluster: kubeRuntime.Cluster,
ClusterName: kubeRuntime.ClusterName,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
}
k.KubeConf = conf

View File

@ -43,12 +43,11 @@ func (k *KubeModule) IsSkip() bool {
func (k *KubeModule) AutoAssert() {
kubeRuntime := k.Runtime.(*KubeRuntime)
conf := &KubeConf{
ClusterHosts: kubeRuntime.ClusterHosts,
ClusterName: kubeRuntime.ClusterName,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
ClusterName: kubeRuntime.ClusterName,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
}
k.KubeConf = conf
@ -62,12 +61,11 @@ type KubeCustomModule struct {
func (k *KubeCustomModule) AutoAssert() {
kubeRuntime := k.Runtime.(*KubeRuntime)
conf := &KubeConf{
ClusterHosts: kubeRuntime.ClusterHosts,
ClusterName: kubeRuntime.ClusterName,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
ClusterName: kubeRuntime.ClusterName,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
}
k.KubeConf = conf

View File

@ -29,11 +29,10 @@ type KubePrepare struct {
func (k *KubePrepare) AutoAssert(runtime connector.Runtime) {
kubeRuntime := runtime.(*KubeRuntime)
conf := &KubeConf{
ClusterHosts: kubeRuntime.ClusterHosts,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
Cluster: kubeRuntime.Cluster,
Kubeconfig: kubeRuntime.Kubeconfig,
ClientSet: kubeRuntime.ClientSet,
Arg: kubeRuntime.Arg,
}
k.KubeConf = conf

View File

@ -17,7 +17,6 @@
package common
import (
"fmt"
kubekeyapiv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
kubekeyclientset "github.com/kubesphere/kubekey/clients/clientset/versioned"
"github.com/kubesphere/kubekey/pkg/core/connector"
@ -25,12 +24,11 @@ import (
type KubeRuntime struct {
connector.BaseRuntime
ClusterHosts []string
ClusterName string
Cluster *kubekeyapiv1alpha2.ClusterSpec
Kubeconfig string
ClientSet *kubekeyclientset.Clientset
Arg Argument
ClusterName string
Cluster *kubekeyapiv1alpha2.ClusterSpec
Kubeconfig string
ClientSet *kubekeyclientset.Clientset
Arg Argument
}
type Argument struct {
@ -65,40 +63,32 @@ func NewKubeRuntime(flag string, arg Argument) (*KubeRuntime, error) {
}
clusterSpec := &cluster.Spec
defaultCluster, hostGroups, err := clusterSpec.SetDefaultClusterSpec(arg.InCluster)
defaultCluster, roleGroups, err := clusterSpec.SetDefaultClusterSpec(arg.InCluster)
if err != nil {
return nil, err
}
base := connector.NewBaseRuntime(cluster.Name, connector.NewDialer(), arg.Debug, arg.IgnoreErr)
for _, v := range hostGroups.All {
host := ToHosts(v)
if v.IsMaster {
host.SetRole(Master)
hostSet := make(map[string]struct{})
for _, role := range roleGroups {
for _, host := range role {
if host.IsRole(Master) || host.IsRole(Worker) {
host.SetRole(K8s)
}
if _, ok := hostSet[host.GetName()]; !ok {
hostSet[host.GetName()] = struct{}{}
base.AppendHost(host)
base.AppendRoleMap(host)
}
}
if v.IsWorker {
host.SetRole(Worker)
}
if v.IsEtcd {
host.SetRole(ETCD)
}
if v.IsMaster || v.IsWorker {
host.SetRole(K8s)
}
if v.IsRegistry {
host.SetRole(Registry)
}
base.AppendHost(host)
base.AppendRoleMap(host)
}
arg.KsEnable = defaultCluster.KubeSphere.Enabled
arg.KsVersion = defaultCluster.KubeSphere.Version
r := &KubeRuntime{
ClusterHosts: generateHosts(hostGroups, defaultCluster),
Cluster: defaultCluster,
ClusterName: cluster.Name,
Arg: arg,
Cluster: defaultCluster,
ClusterName: cluster.Name,
Arg: arg,
}
r.BaseRuntime = base
@ -110,37 +100,3 @@ func (k *KubeRuntime) Copy() connector.Runtime {
runtime := *k
return &runtime
}
func ToHosts(cfg kubekeyapiv1alpha2.HostCfg) *connector.BaseHost {
host := connector.NewHost()
host.Name = cfg.Name
host.Address = cfg.Address
host.InternalAddress = cfg.InternalAddress
host.Port = cfg.Port
host.User = cfg.User
host.Password = cfg.Password
host.PrivateKey = cfg.PrivateKey
host.PrivateKeyPath = cfg.PrivateKeyPath
host.Arch = cfg.Arch
return host
}
func generateHosts(hostGroups *kubekeyapiv1alpha2.HostGroups, cfg *kubekeyapiv1alpha2.ClusterSpec) []string {
var lbHost string
var hostsList []string
if cfg.ControlPlaneEndpoint.Address != "" {
lbHost = fmt.Sprintf("%s %s", cfg.ControlPlaneEndpoint.Address, cfg.ControlPlaneEndpoint.Domain)
} else {
lbHost = fmt.Sprintf("%s %s", hostGroups.Master[0].InternalAddress, cfg.ControlPlaneEndpoint.Domain)
}
for _, host := range cfg.Hosts {
if host.Name != "" {
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.Name, cfg.Kubernetes.DNSDomain, host.Name))
}
}
hostsList = append(hostsList, lbHost)
return hostsList
}

View File

@ -100,11 +100,11 @@ func (d *DefaultLoader) Load() (*kubekeyapiv1alpha2.Cluster, error) {
Arch: runtime.GOARCH,
})
allInOne.Spec.RoleGroups = kubekeyapiv1alpha2.RoleGroups{
Etcd: []string{hostname},
Master: []string{hostname},
Worker: []string{hostname},
Registry: []string{hostname},
allInOne.Spec.RoleGroups = map[string][]string{
Master: {hostname},
ETCD: {hostname},
Worker: {hostname},
Registry: {hostname},
}
if d.KubernetesVersion != "" {
s := strings.Split(d.KubernetesVersion, "-")

View File

@ -32,19 +32,19 @@ metadata:
name: {{ .Options.Name }}
spec:
hosts:
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: ubuntu, password: Qcloud@123}
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, user: ubuntu, password: Qcloud@123}
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: ubuntu, password: "Qcloud@123"}
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, user: ubuntu, password: "Qcloud@123"}
roleGroups:
etcd:
- node1
master:
control-plane:
- node1
worker:
- node1
- node2
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
#internalLoadbalancer: haproxy
# internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: ""

View File

@ -228,8 +228,8 @@ func (j *JoinNodesModule) Init() {
}
joinMasterNode := &task.RemoteTask{
Name: "JoinMasterNode",
Desc: "Join master node",
Name: "JoinControlPlaneNode",
Desc: "Join control-plane node",
Hosts: j.Runtime.GetHostsByRole(common.Master),
Prepare: &prepare.PrepareCollection{
&NodeInCluster{Not: true},