Experiment: modify the structure and remove experiment folder

Signed-off-by: 24sama <jacksama@foxmail.com>
This commit is contained in:
24sama 2021-08-20 16:21:47 +08:00 committed by 24sama
parent 847b46860a
commit 086fa6f42d
80 changed files with 153 additions and 2388 deletions

View File

@ -19,12 +19,12 @@ package v1alpha1
import (
"errors"
"fmt"
"github.com/kubesphere/kubekey/pkg/core/logger"
"regexp"
"strconv"
"strings"
"github.com/kubesphere/kubekey/pkg/util"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -226,7 +226,7 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string {
}
// GroupHosts is used to group hosts according to the configuration file.s
func (cfg *ClusterSpec) GroupHosts(logger *log.Logger) (*HostGroups, error) {
func (cfg *ClusterSpec) GroupHosts() (*HostGroups, error) {
clusterHostsGroups := HostGroups{}
hostList := map[string]string{}
@ -234,7 +234,7 @@ func (cfg *ClusterSpec) GroupHosts(logger *log.Logger) (*HostGroups, error) {
hostList[host.Name] = host.Name
}
etcdGroup, masterGroup, workerGroup, err := cfg.ParseRolesList(hostList, logger)
etcdGroup, masterGroup, workerGroup, err := cfg.ParseRolesList(hostList)
if err != nil {
return nil, err
}
@ -284,10 +284,10 @@ func (cfg *ClusterSpec) GroupHosts(logger *log.Logger) (*HostGroups, error) {
//Check that the parameters under roleGroups are incorrect
if len(masterGroup) == 0 {
logger.Fatal(errors.New("The number of master cannot be 0"))
logger.Log.Fatal(errors.New("The number of master cannot be 0"))
}
if len(etcdGroup) == 0 {
logger.Fatal(errors.New("The number of etcd cannot be 0"))
logger.Log.Fatal(errors.New("The number of etcd cannot be 0"))
}
if len(masterGroup) != len(clusterHostsGroups.Master) {
@ -323,17 +323,17 @@ func (cfg *ClusterSpec) ClusterDNS() string {
}
// ParseRolesList is used to parse the host grouping list.
func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string, logger *log.Logger) ([]string, []string, []string, error) {
func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string) ([]string, []string, []string, error) {
etcdGroupList := []string{}
masterGroupList := []string{}
workerGroupList := []string{}
for _, host := range cfg.RoleGroups.Etcd {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
etcdGroupList = append(etcdGroupList, getHostsRange(host, hostList, "etcd", logger)...)
etcdGroupList = append(etcdGroupList, getHostsRange(host, hostList, "etcd")...)
} else {
if err := hostVerify(hostList, host, "etcd"); err != nil {
logger.Fatal(err)
logger.Log.Fatal(err)
}
etcdGroupList = append(etcdGroupList, host)
}
@ -341,10 +341,10 @@ func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string, logger *log.L
for _, host := range cfg.RoleGroups.Master {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
masterGroupList = append(masterGroupList, getHostsRange(host, hostList, "master", logger)...)
masterGroupList = append(masterGroupList, getHostsRange(host, hostList, "master")...)
} else {
if err := hostVerify(hostList, host, "master"); err != nil {
logger.Fatal(err)
logger.Log.Fatal(err)
}
masterGroupList = append(masterGroupList, host)
}
@ -352,10 +352,10 @@ func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string, logger *log.L
for _, host := range cfg.RoleGroups.Worker {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
workerGroupList = append(workerGroupList, getHostsRange(host, hostList, "worker", logger)...)
workerGroupList = append(workerGroupList, getHostsRange(host, hostList, "worker")...)
} else {
if err := hostVerify(hostList, host, "worker"); err != nil {
logger.Fatal(err)
logger.Log.Fatal(err)
}
workerGroupList = append(workerGroupList, host)
}
@ -363,7 +363,7 @@ func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string, logger *log.L
return etcdGroupList, masterGroupList, workerGroupList, nil
}
func getHostsRange(rangeStr string, hostList map[string]string, group string, logger *log.Logger) []string {
func getHostsRange(rangeStr string, hostList map[string]string, group string) []string {
hostRangeList := []string{}
r := regexp.MustCompile(`\[(\d+)\:(\d+)\]`)
nameSuffix := r.FindStringSubmatch(rangeStr)
@ -372,7 +372,7 @@ func getHostsRange(rangeStr string, hostList map[string]string, group string, lo
nameSuffixEnd, _ := strconv.Atoi(nameSuffix[2])
for i := nameSuffixStart; i <= nameSuffixEnd; i++ {
if err := hostVerify(hostList, fmt.Sprintf("%s%d", namePrefix, i), group); err != nil {
logger.Fatal(err)
logger.Log.Fatal(err)
}
hostRangeList = append(hostRangeList, fmt.Sprintf("%s%d", namePrefix, i))
}

View File

@ -22,7 +22,6 @@ import (
"strings"
"github.com/kubesphere/kubekey/pkg/util"
log "github.com/sirupsen/logrus"
)
const (
@ -74,12 +73,12 @@ const (
DefaultDNSAddress = "114.114.114.114"
)
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool, logger *log.Logger) (*ClusterSpec, *HostGroups, error) {
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, *HostGroups, error) {
clusterCfg := ClusterSpec{}
clusterCfg.Hosts = SetDefaultHostsCfg(cfg)
clusterCfg.RoleGroups = cfg.RoleGroups
hostGroups, err := clusterCfg.GroupHosts(logger)
hostGroups, err := clusterCfg.GroupHosts()
if err != nil {
return nil, nil, err
}
@ -146,9 +145,15 @@ func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []HostCfg, incluster bool) ControlPlaneEndpoint {
if !incluster {
//The detection is not an HA environment, and the address at LB does not need input
if len(masterGroup) == 1 && cfg.ControlPlaneEndpoint.Address != "" {
fmt.Println("When the environment is not HA, the LB address does not need to be entered, so delete the corresponding value.")
os.Exit(0)
}
//Check whether LB should be configured
if len(masterGroup) >= 2 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
fmt.Println("The number of nodes in the ControlPlane is above 1, You must set the value of the LB address or enable the internal loadbalancer.")
if len(masterGroup) >= 3 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
fmt.Println("When the environment has at least three masters, You must set the value of the LB address or enable the internal loadbalancer.")
os.Exit(0)
}

View File

@ -18,26 +18,38 @@ package cmd
import (
"fmt"
"github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/cluster/install"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/pipelines"
"github.com/kubesphere/kubekey/version"
"github.com/spf13/cobra"
"time"
)
var logo = `
_ __ _ _ __
| | / / | | | | / /
| |/ / _ _| |__ ___| |/ / ___ _ _
| \| | | | '_ \ / _ \ \ / _ \ | | |
| |\ \ |_| | |_) | __/ |\ \ __/ |_| |
\_| \_/\__,_|_.__/ \___\_| \_/\___|\__, |
__/ |
|___/
`
// clusterCmd represents the cluster command
var clusterCmd = &cobra.Command{
Use: "cluster",
Short: "Create a Kubernetes or KubeSphere cluster",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(logo)
var ksVersion string
if opt.Kubesphere && len(args) > 0 {
ksVersion = args[0]
} else {
ksVersion = ""
}
logger := util.InitLogger(opt.Verbose)
return install.CreateCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, logger, opt.Kubesphere, opt.Verbose, opt.SkipCheck, opt.SkipPullImages, opt.InCluster, opt.LocalStorage, opt.DownloadCmd, opt.ContainerManager)
return pipelines.CreateCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, opt.Kubesphere, opt.Verbose, opt.SkipCheck, opt.SkipPullImages, opt.InCluster, opt.LocalStorage)
},
}
@ -47,10 +59,9 @@ func init() {
clusterCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
clusterCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", v1alpha1.DefaultKubeVersion, "Specify a supported version of kubernetes")
clusterCmd.Flags().BoolVarP(&opt.LocalStorage, "with-local-storage", "", false, "Deploy a local PV provisioner")
clusterCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.2.0)")
clusterCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.1.0)")
clusterCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
clusterCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
clusterCmd.Flags().StringVarP(&opt.ContainerManager, "container-manager", "", "docker", "Container runtime: docker, crio, containerd and isula.")
clusterCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
`The user defined command to download the necessary binary files. The first param '%s' is output path, the second param '%s', is the URL`)
@ -62,7 +73,7 @@ func init() {
func setValidArgs(cmd *cobra.Command) (err error) {
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) (
strings []string, directive cobra.ShellCompDirective) {
versionArray := []string{"v2.1.1", "v3.0.0", "v3.1.0", "v3.1.1", "v3.2.0", time.Now().Add(-time.Hour * 24).Format("nightly-20060102")}
versionArray := []string{"v2.1.1", "v3.0.0", "v3.1.0", "v3.1.1", time.Now().Add(-time.Hour * 24).Format("nightly-20060102")}
return versionArray, cobra.ShellCompDirectiveNoFileComp
}

View File

@ -1,43 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
type Addon struct {
Name string `yaml:"name" json:"name,omitempty"`
Namespace string `yaml:"namespace" json:"namespace,omitempty"`
Sources Sources `yaml:"sources" json:"sources,omitempty"`
Retries int `yaml:"retries" json:"retries,omitempty"`
Delay int `yaml:"delay" json:"delay,omitempty"`
}
type Sources struct {
Chart Chart `yaml:"chart" json:"chart,omitempty"`
Yaml Yaml `yaml:"yaml" json:"yaml,omitempty"`
}
type Chart struct {
Name string `yaml:"name" json:"name,omitempty"`
Repo string `yaml:"repo" json:"repo,omitempty"`
Path string `yaml:"path" json:"path,omitempty"`
Version string `yaml:"version" json:"version,omitempty"`
ValuesFile string `yaml:"valuesFile" json:"valuesFile,omitempty"`
Values []string `yaml:"values" json:"values,omitempty"`
}
type Yaml struct {
Path []string `yaml:"path" json:"path,omitempty"`
}

View File

@ -1,381 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"errors"
"fmt"
"github.com/kubesphere/kubekey/experiment/core/logger"
"regexp"
"strconv"
"strings"
"github.com/kubesphere/kubekey/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ClusterSpec defines the desired state of Cluster
type ClusterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Foo is an example field of Cluster. Edit Cluster_types.go to remove/update
Hosts []HostCfg `yaml:"hosts" json:"hosts,omitempty"`
RoleGroups RoleGroups `yaml:"roleGroups" json:"roleGroups,omitempty"`
ControlPlaneEndpoint ControlPlaneEndpoint `yaml:"controlPlaneEndpoint" json:"controlPlaneEndpoint,omitempty"`
Kubernetes Kubernetes `yaml:"kubernetes" json:"kubernetes,omitempty"`
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
Registry RegistryConfig `yaml:"registry" json:"registry,omitempty"`
Addons []Addon `yaml:"addons" json:"addons,omitempty"`
KubeSphere KubeSphere `json:"kubesphere,omitempty"`
}
// ClusterStatus defines the observed state of Cluster
type ClusterStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
JobInfo JobInfo `json:"jobInfo,omitempty"`
Version string `json:"version,omitempty"`
NetworkPlugin string `json:"networkPlugin,omitempty"`
NodesCount int `json:"nodesCount,omitempty"`
EtcdCount int `json:"etcdCount,omitempty"`
MasterCount int `json:"masterCount,omitempty"`
WorkerCount int `json:"workerCount,omitempty"`
Nodes []NodeStatus `json:"nodes,omitempty"`
Conditions []Condition `json:"Conditions,omitempty"`
}
// JobInfo defines the job information to be used to create a cluster or add a node.
type JobInfo struct {
Namespace string `json:"namespace,omitempty"`
Name string `json:"name,omitempty"`
Pods []PodInfo `json:"pods,omitempty"`
}
// PodInfo defines the pod information to be used to create a cluster or add a node.
type PodInfo struct {
Name string `json:"name,omitempty"`
Containers []ContainerInfo `json:"containers,omitempty"`
}
// ContainerInfo defines the container information to be used to create a cluster or add a node.
type ContainerInfo struct {
Name string `json:"name,omitempty"`
}
// NodeStatus defines the status information of the nodes in the cluster.
type NodeStatus struct {
InternalIP string `json:"internalIP,omitempty"`
Hostname string `json:"hostname,omitempty"`
Roles map[string]bool `json:"roles,omitempty"`
}
// Condition defines the process information.
type Condition struct {
Step string `json:"step,omitempty"`
StartTime metav1.Time `json:"startTime,omitempty"`
EndTime metav1.Time `json:"endTime,omitempty"`
Status bool `json:"status,omitempty"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Cluster is the Schema for the clusters API
// +kubebuilder:resource:path=clusters,scope=Cluster
type Cluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSpec `json:"spec,omitempty"`
Status ClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ClusterList contains a list of Cluster
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Cluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&Cluster{}, &ClusterList{})
}
// HostCfg defines host information for cluster.
type HostCfg struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Address string `yaml:"address,omitempty" json:"address,omitempty"`
InternalAddress string `yaml:"internalAddress,omitempty" json:"internalAddress,omitempty"`
Port int `yaml:"port,omitempty" json:"port,omitempty"`
User string `yaml:"user,omitempty" json:"user,omitempty"`
Password string `yaml:"password,omitempty" json:"password,omitempty"`
PrivateKey string `yaml:"privateKey,omitempty" json:"privateKey,omitempty"`
PrivateKeyPath string `yaml:"privateKeyPath,omitempty" json:"privateKeyPath,omitempty"`
Arch string `yaml:"arch,omitempty" json:"arch,omitempty"`
Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"`
ID string `yaml:"id,omitempty" json:"id,omitempty"`
Index int `json:"-"`
IsEtcd bool `json:"-"`
IsMaster bool `json:"-"`
IsWorker bool `json:"-"`
}
// RoleGroups defines the grouping of role for hosts (etcd / master / worker).
type RoleGroups struct {
Etcd []string `yaml:"etcd" json:"etcd,omitempty"`
Master []string `yaml:"master" json:"master,omitempty"`
Worker []string `yaml:"worker" json:"worker,omitempty"`
}
// HostGroups defines the grouping of hosts for cluster (all / etcd / master / worker / k8s).
type HostGroups struct {
All []HostCfg
Etcd []HostCfg
Master []HostCfg
Worker []HostCfg
K8s []HostCfg
}
// ControlPlaneEndpoint defines the control plane endpoint information for cluster.
type ControlPlaneEndpoint struct {
InternalLoadbalancer string `yaml:"internalLoadbalancer" json:"internalLoadbalancer,omitempty"`
Domain string `yaml:"domain" json:"domain,omitempty"`
Address string `yaml:"address" json:"address,omitempty"`
Port int `yaml:"port" json:"port,omitempty"`
}
// RegistryConfig defines the configuration information of the image's repository.
type RegistryConfig struct {
RegistryMirrors []string `yaml:"registryMirrors" json:"registryMirrors,omitempty"`
InsecureRegistries []string `yaml:"insecureRegistries" json:"insecureRegistries,omitempty"`
PrivateRegistry string `yaml:"privateRegistry" json:"privateRegistry,omitempty"`
}
// KubeSphere defines the configuration information of the KubeSphere.
type KubeSphere struct {
Enabled bool `json:"enabled,omitempty"`
Version string `json:"version,omitempty"`
Configurations string `json:"configurations,omitempty"`
}
// ExternalEtcd defines configuration information of external etcd.
type ExternalEtcd struct {
Endpoints []string
CaFile string
CertFile string
KeyFile string
}
// GenerateCertSANs is used to generate cert sans for cluster.
func (cfg *ClusterSpec) GenerateCertSANs() []string {
clusterSvc := fmt.Sprintf("kubernetes.default.svc.%s", cfg.Kubernetes.ClusterName)
defaultCertSANs := []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", clusterSvc, "localhost", "127.0.0.1"}
extraCertSANs := []string{}
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Domain)
extraCertSANs = append(extraCertSANs, cfg.ControlPlaneEndpoint.Address)
for _, host := range cfg.Hosts {
extraCertSANs = append(extraCertSANs, host.Name)
extraCertSANs = append(extraCertSANs, fmt.Sprintf("%s.%s", host.Name, cfg.Kubernetes.ClusterName))
if host.Address != cfg.ControlPlaneEndpoint.Address {
extraCertSANs = append(extraCertSANs, host.Address)
}
if host.InternalAddress != host.Address && host.InternalAddress != cfg.ControlPlaneEndpoint.Address {
extraCertSANs = append(extraCertSANs, host.InternalAddress)
}
}
extraCertSANs = append(extraCertSANs, util.ParseIp(cfg.Network.KubeServiceCIDR)[0])
defaultCertSANs = append(defaultCertSANs, extraCertSANs...)
if cfg.Kubernetes.ApiserverCertExtraSans != nil {
defaultCertSANs = append(defaultCertSANs, cfg.Kubernetes.ApiserverCertExtraSans...)
}
return defaultCertSANs
}
// GroupHosts is used to group hosts according to the configuration file.s
func (cfg *ClusterSpec) GroupHosts() (*HostGroups, error) {
clusterHostsGroups := HostGroups{}
hostList := map[string]string{}
for _, host := range cfg.Hosts {
hostList[host.Name] = host.Name
}
etcdGroup, masterGroup, workerGroup, err := cfg.ParseRolesList(hostList)
if err != nil {
return nil, err
}
for index, host := range cfg.Hosts {
host.Index = index
if len(etcdGroup) > 0 {
for _, hostName := range etcdGroup {
if host.Name == hostName {
host.IsEtcd = true
break
}
}
}
if len(masterGroup) > 0 {
for _, hostName := range masterGroup {
if host.Name == hostName {
host.IsMaster = true
break
}
}
}
if len(workerGroup) > 0 {
for _, hostName := range workerGroup {
if hostName != "" && host.Name == hostName {
host.IsWorker = true
break
}
}
}
if host.IsEtcd {
clusterHostsGroups.Etcd = append(clusterHostsGroups.Etcd, host)
}
if host.IsMaster {
clusterHostsGroups.Master = append(clusterHostsGroups.Master, host)
}
if host.IsWorker {
clusterHostsGroups.Worker = append(clusterHostsGroups.Worker, host)
}
if host.IsMaster || host.IsWorker {
clusterHostsGroups.K8s = append(clusterHostsGroups.K8s, host)
}
clusterHostsGroups.All = append(clusterHostsGroups.All, host)
}
//Check that the parameters under roleGroups are incorrect
if len(masterGroup) == 0 {
logger.Log.Fatal(errors.New("The number of master cannot be 0"))
}
if len(etcdGroup) == 0 {
logger.Log.Fatal(errors.New("The number of etcd cannot be 0"))
}
if len(masterGroup) != len(clusterHostsGroups.Master) {
return nil, errors.New("Incorrect nodeName under roleGroups/master in the configuration file")
}
if len(etcdGroup) != len(clusterHostsGroups.Etcd) {
return nil, errors.New("Incorrect nodeName under roleGroups/etcd in the configuration file")
}
if len(workerGroup) != len(clusterHostsGroups.Worker) {
return nil, errors.New("Incorrect nodeName under roleGroups/work in the configuration file")
}
return &clusterHostsGroups, nil
}
// ClusterIP is used to get the kube-apiserver service address inside the cluster.
func (cfg *ClusterSpec) ClusterIP() string {
return util.ParseIp(cfg.Network.KubeServiceCIDR)[2]
}
// ParseRolesList is used to parse the host grouping list.
func (cfg *ClusterSpec) ParseRolesList(hostList map[string]string) ([]string, []string, []string, error) {
etcdGroupList := []string{}
masterGroupList := []string{}
workerGroupList := []string{}
for _, host := range cfg.RoleGroups.Etcd {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
etcdGroupList = append(etcdGroupList, getHostsRange(host, hostList, "etcd")...)
} else {
if err := hostVerify(hostList, host, "etcd"); err != nil {
logger.Log.Fatal(err)
}
etcdGroupList = append(etcdGroupList, host)
}
}
for _, host := range cfg.RoleGroups.Master {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
masterGroupList = append(masterGroupList, getHostsRange(host, hostList, "master")...)
} else {
if err := hostVerify(hostList, host, "master"); err != nil {
logger.Log.Fatal(err)
}
masterGroupList = append(masterGroupList, host)
}
}
for _, host := range cfg.RoleGroups.Worker {
if strings.Contains(host, "[") && strings.Contains(host, "]") && strings.Contains(host, ":") {
workerGroupList = append(workerGroupList, getHostsRange(host, hostList, "worker")...)
} else {
if err := hostVerify(hostList, host, "worker"); err != nil {
logger.Log.Fatal(err)
}
workerGroupList = append(workerGroupList, host)
}
}
return etcdGroupList, masterGroupList, workerGroupList, nil
}
func getHostsRange(rangeStr string, hostList map[string]string, group string) []string {
hostRangeList := []string{}
r := regexp.MustCompile(`\[(\d+)\:(\d+)\]`)
nameSuffix := r.FindStringSubmatch(rangeStr)
namePrefix := strings.Split(rangeStr, nameSuffix[0])[0]
nameSuffixStart, _ := strconv.Atoi(nameSuffix[1])
nameSuffixEnd, _ := strconv.Atoi(nameSuffix[2])
for i := nameSuffixStart; i <= nameSuffixEnd; i++ {
if err := hostVerify(hostList, fmt.Sprintf("%s%d", namePrefix, i), group); err != nil {
logger.Log.Fatal(err)
}
hostRangeList = append(hostRangeList, fmt.Sprintf("%s%d", namePrefix, i))
}
return hostRangeList
}
func hostVerify(hostList map[string]string, hostName string, group string) error {
if _, ok := hostList[hostName]; !ok {
return fmt.Errorf("[%s] is in [%s] group, but not in hosts list", hostName, group)
}
return nil
}
const (
Haproxy = "haproxy"
)
func (c ControlPlaneEndpoint) IsInternalLBEnabled() bool {
if c.InternalLoadbalancer == Haproxy {
return true
}
return false
}

View File

@ -1,251 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"os"
"strings"
"github.com/kubesphere/kubekey/pkg/util"
)
const (
DefaultPreDir = "kubekey"
DefaultSSHPort = 22
DefaultLBPort = 6443
DefaultLBDomain = "lb.kubesphere.local"
DefaultNetworkPlugin = "calico"
DefaultPodsCIDR = "10.233.64.0/18"
DefaultServiceCIDR = "10.233.0.0/18"
DefaultKubeImageNamespace = "kubesphere"
DefaultClusterName = "cluster.local"
DefaultArch = "amd64"
DefaultEtcdVersion = "v3.4.13"
DefaultEtcdPort = "2379"
DefaultKubeVersion = "v1.19.8"
DefaultCalicoVersion = "v3.16.3"
DefaultFlannelVersion = "v0.12.0"
DefaultCniVersion = "v0.8.6"
DefaultCiliumVersion = "v1.8.3"
DefaultKubeovnVersion = "v1.5.0"
DefaultHelmVersion = "v3.2.1"
DefaultMaxPods = 110
DefaultNodeCidrMaskSize = 24
DefaultIPIPMode = "Always"
DefaultVXLANMode = "Never"
DefaultVethMTU = 1440
DefaultBackendMode = "vxlan"
DefaultProxyMode = "ipvs"
DefaultCrioEndpoint = "unix:///var/run/crio/crio.sock"
DefaultContainerdEndpoint = "unix:///run/containerd/containerd.sock"
DefaultIsulaEndpoint = "unix:///var/run/isulad.sock"
Etcd = "etcd"
Master = "master"
Worker = "worker"
K8s = "k8s"
DefaultEtcdBackupDir = "/var/backups/kube_etcd"
DefaultEtcdBackupPeriod = 30
DefaultKeepBackNumber = 5
DefaultEtcdBackupScriptDir = "/usr/local/bin/kube-scripts"
DefaultJoinCIDR = "100.64.0.0/16"
DefaultNetworkType = "geneve"
DefaultVlanID = "100"
DefaultOvnLabel = "node-role.kubernetes.io/master"
DefaultDPDKVersion = "19.11"
DefaultDNSAddress = "114.114.114.114"
)
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, *HostGroups, error) {
clusterCfg := ClusterSpec{}
clusterCfg.Hosts = SetDefaultHostsCfg(cfg)
clusterCfg.RoleGroups = cfg.RoleGroups
hostGroups, err := clusterCfg.GroupHosts()
if err != nil {
return nil, nil, err
}
clusterCfg.ControlPlaneEndpoint = SetDefaultLBCfg(cfg, hostGroups.Master, incluster)
clusterCfg.Network = SetDefaultNetworkCfg(cfg)
clusterCfg.Kubernetes = SetDefaultClusterCfg(cfg)
clusterCfg.Registry = cfg.Registry
clusterCfg.Addons = cfg.Addons
clusterCfg.KubeSphere = cfg.KubeSphere
if cfg.Kubernetes.ClusterName == "" {
clusterCfg.Kubernetes.ClusterName = DefaultClusterName
}
if cfg.Kubernetes.Version == "" {
clusterCfg.Kubernetes.Version = DefaultKubeVersion
}
if cfg.Kubernetes.MaxPods == 0 {
clusterCfg.Kubernetes.MaxPods = DefaultMaxPods
}
if cfg.Kubernetes.NodeCidrMaskSize == 0 {
clusterCfg.Kubernetes.NodeCidrMaskSize = DefaultNodeCidrMaskSize
}
if cfg.Kubernetes.ProxyMode == "" {
clusterCfg.Kubernetes.ProxyMode = DefaultProxyMode
}
return &clusterCfg, hostGroups, nil
}
func SetDefaultHostsCfg(cfg *ClusterSpec) []HostCfg {
var hostscfg []HostCfg
if len(cfg.Hosts) == 0 {
return nil
}
for _, host := range cfg.Hosts {
if len(host.Address) == 0 && len(host.InternalAddress) > 0 {
host.Address = host.InternalAddress
}
if len(host.InternalAddress) == 0 && len(host.Address) > 0 {
host.InternalAddress = host.Address
}
if host.User == "" {
host.User = "root"
}
if host.Port == 0 {
host.Port = DefaultSSHPort
}
if host.PrivateKey == "" {
if host.Password == "" && host.PrivateKeyPath == "" {
host.PrivateKeyPath = "~/.ssh/id_rsa"
}
if host.PrivateKeyPath != "" && strings.HasPrefix(strings.TrimSpace(host.PrivateKeyPath), "~/") {
homeDir, _ := util.Home()
host.PrivateKeyPath = strings.Replace(host.PrivateKeyPath, "~/", fmt.Sprintf("%s/", homeDir), 1)
}
}
if host.Arch == "" {
host.Arch = DefaultArch
}
hostscfg = append(hostscfg, host)
}
return hostscfg
}
func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []HostCfg, incluster bool) ControlPlaneEndpoint {
if !incluster {
//The detection is not an HA environment, and the address at LB does not need input
if len(masterGroup) == 1 && cfg.ControlPlaneEndpoint.Address != "" {
fmt.Println("When the environment is not HA, the LB address does not need to be entered, so delete the corresponding value.")
os.Exit(0)
}
//Check whether LB should be configured
if len(masterGroup) >= 3 && !cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address == "" {
fmt.Println("When the environment has at least three masters, You must set the value of the LB address or enable the internal loadbalancer.")
os.Exit(0)
}
// Check whether LB address and the internal LB are both enabled
if cfg.ControlPlaneEndpoint.IsInternalLBEnabled() && cfg.ControlPlaneEndpoint.Address != "" {
fmt.Println("You cannot set up the internal load balancer and the LB address at the same time.")
os.Exit(0)
}
}
if cfg.ControlPlaneEndpoint.Address == "" || cfg.ControlPlaneEndpoint.Address == "127.0.0.1" {
cfg.ControlPlaneEndpoint.Address = masterGroup[0].InternalAddress
}
if cfg.ControlPlaneEndpoint.Domain == "" {
cfg.ControlPlaneEndpoint.Domain = DefaultLBDomain
}
if cfg.ControlPlaneEndpoint.Port == 0 {
cfg.ControlPlaneEndpoint.Port = DefaultLBPort
}
defaultLbCfg := cfg.ControlPlaneEndpoint
return defaultLbCfg
}
func SetDefaultNetworkCfg(cfg *ClusterSpec) NetworkConfig {
if cfg.Network.Plugin == "" {
cfg.Network.Plugin = DefaultNetworkPlugin
}
if cfg.Network.KubePodsCIDR == "" {
cfg.Network.KubePodsCIDR = DefaultPodsCIDR
}
if cfg.Network.KubeServiceCIDR == "" {
cfg.Network.KubeServiceCIDR = DefaultServiceCIDR
}
if cfg.Network.Calico.IPIPMode == "" {
cfg.Network.Calico.IPIPMode = DefaultIPIPMode
}
if cfg.Network.Calico.VXLANMode == "" {
cfg.Network.Calico.VXLANMode = DefaultVXLANMode
}
if cfg.Network.Calico.VethMTU == 0 {
cfg.Network.Calico.VethMTU = DefaultVethMTU
}
if cfg.Network.Flannel.BackendMode == "" {
cfg.Network.Flannel.BackendMode = DefaultBackendMode
}
// kube-ovn default config
if cfg.Network.Kubeovn.JoinCIDR == "" {
cfg.Network.Kubeovn.JoinCIDR = DefaultJoinCIDR
}
if cfg.Network.Kubeovn.Label == "" {
cfg.Network.Kubeovn.Label = DefaultOvnLabel
}
if cfg.Network.Kubeovn.VlanID == "" {
cfg.Network.Kubeovn.VlanID = DefaultVlanID
}
if cfg.Network.Kubeovn.NetworkType == "" {
cfg.Network.Kubeovn.NetworkType = DefaultNetworkType
}
if cfg.Network.Kubeovn.PingerExternalAddress == "" {
cfg.Network.Kubeovn.PingerExternalAddress = DefaultDNSAddress
}
if cfg.Network.Kubeovn.DpdkVersion == "" {
cfg.Network.Kubeovn.DpdkVersion = DefaultDPDKVersion
}
defaultNetworkCfg := cfg.Network
return defaultNetworkCfg
}
func SetDefaultClusterCfg(cfg *ClusterSpec) Kubernetes {
if cfg.Kubernetes.Version == "" {
cfg.Kubernetes.Version = DefaultKubeVersion
} else {
s := strings.Split(cfg.Kubernetes.Version, "-")
if len(s) > 1 {
cfg.Kubernetes.Version = s[0]
cfg.Kubernetes.Type = s[1]
}
}
if cfg.Kubernetes.ClusterName == "" {
cfg.Kubernetes.ClusterName = DefaultClusterName
}
if cfg.Kubernetes.EtcdBackupDir == "" {
cfg.Kubernetes.EtcdBackupDir = DefaultEtcdBackupDir
}
if cfg.Kubernetes.EtcdBackupPeriod == 0 {
cfg.Kubernetes.EtcdBackupPeriod = DefaultEtcdBackupPeriod
}
if cfg.Kubernetes.KeepBackupNumber == 0 {
cfg.Kubernetes.KeepBackupNumber = DefaultKeepBackNumber
}
if cfg.Kubernetes.EtcdBackupScriptDir == "" {
cfg.Kubernetes.EtcdBackupScriptDir = DefaultEtcdBackupScriptDir
}
defaultClusterCfg := cfg.Kubernetes
return defaultClusterCfg
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +groupName=kubekey.kubesphere.io
package v1alpha1

View File

@ -1,36 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the kubekey v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=kubekey.kubesphere.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,43 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import "k8s.io/apimachinery/pkg/runtime"
type Kubernetes struct {
Type string `yaml:"type" json:"type,omitempty"`
Version string `yaml:"version" json:"version,omitempty"`
ClusterName string `yaml:"clusterName" json:"clusterName,omitempty"`
MasqueradeAll bool `yaml:"masqueradeAll" json:"masqueradeAll,omitempty"`
MaxPods int `yaml:"maxPods" json:"maxPods,omitempty"`
NodeCidrMaskSize int `yaml:"nodeCidrMaskSize" json:"nodeCidrMaskSize,omitempty"`
ApiserverCertExtraSans []string `yaml:"apiserverCertExtraSans" json:"apiserverCertExtraSans,omitempty"`
ProxyMode string `yaml:"proxyMode" json:"proxyMode,omitempty"`
EtcdBackupDir string `yaml:"etcdBackupDir" json:"etcdBackupDir,omitempty"`
EtcdBackupPeriod int `yaml:"etcdBackupPeriod" json:"etcdBackupPeriod,omitempty"`
KeepBackupNumber int `yaml:"keepBackupNumber" json:"keepBackupNumber,omitempty"`
EtcdBackupScriptDir string `yaml:"etcdBackupScript" json:"etcdBackupScript,omitempty"`
ContainerManager string `yaml:"containerManager" json:"containerManager,omitempty"`
ContainerRuntimeEndpoint string `yaml:"containerRuntimeEndpoint" json:"containerRuntimeEndpoint,omitempty"`
ApiServerArgs []string `yaml:"apiserverArgs" json:"apiserverArgs,omitempty"`
ControllerManagerArgs []string `yaml:"controllerManagerArgs" json:"controllerManagerArgs,omitempty"`
SchedulerArgs []string `yaml:"schedulerArgs" json:"schedulerArgs,omitempty"`
KubeletArgs []string `yaml:"kubeletArgs" json:"kubeletArgs,omitempty"`
KubeProxyArgs []string `yaml:"kubeProxyArgs" json:"kubeProxyArgs,omitempty"`
KubeletConfiguration runtime.RawExtension `yaml:"kubeletConfiguration" json:"kubeletConfiguration,omitempty"`
KubeProxyConfiguration runtime.RawExtension `yaml:"kubeProxyConfiguration" json:"kubeProxyConfiguration,omitempty"`
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
type NetworkConfig struct {
Plugin string `yaml:"plugin" json:"plugin,omitempty"`
KubePodsCIDR string `yaml:"kubePodsCIDR" json:"kubePodsCIDR,omitempty"`
KubeServiceCIDR string `yaml:"kubeServiceCIDR" json:"kubeServiceCIDR,omitempty"`
Calico CalicoCfg `yaml:"calico" json:"calico,omitempty"`
Flannel FlannelCfg `yaml:"flannel" json:"flannel,omitempty"`
Kubeovn KubeovnCfg `yaml:"kubeovn" json:"kubeovn,omitempty"`
}
type CalicoCfg struct {
IPIPMode string `yaml:"ipipMode" json:"ipipMode,omitempty"`
VXLANMode string `yaml:"vxlanMode" json:"vxlanMode,omitempty"`
VethMTU int `yaml:"vethMTU" json:"vethMTU,omitempty"`
}
type FlannelCfg struct {
BackendMode string `yaml:"backendMode" json:"backendMode,omitempty"`
}
type KubeovnCfg struct {
JoinCIDR string `yaml:"joinCIDR" json:"joinCIDR,omitempty"`
NetworkType string `yaml:"networkType" json:"networkType,omitempty"`
Label string `yaml:"label" json:"label,omitempty"`
Iface string `yaml:"iface" json:"iface,omitempty"`
VlanInterfaceName string `yaml:"vlanInterfaceName" json:"vlanInterfaceName,omitempty"`
VlanID string `yaml:"vlanID" json:"vlanID,omitempty"`
DpdkMode bool `yaml:"dpdkMode" json:"dpdkMode,omitempty"`
EnableSSL bool `yaml:"enableSSL" json:"enableSSL,omitempty"`
EnableMirror bool `yaml:"enableMirror" json:"enableMirror,omitempty"`
HwOffload bool `yaml:"hwOffload" json:"hwOffload,omitempty"`
DpdkVersion string `yaml:"dpdkVersion" json:"dpdkVersion,omitempty"`
PingerExternalAddress string `yaml:"pingerExternalAddress" json:"pingerExternalAddress,omitempty"`
PingerExternalDomain string `yaml:"pingerExternalDomain" json:"pingerExternalDomain,omitempty"`
}

View File

@ -1,51 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = GroupVersion
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
//var (
// // SchemeBuilder initializes a scheme builder
// SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// // AddToScheme is a global function that registers this API group & version to a scheme
// AddToScheme = SchemeBuilder.AddToScheme
//)
// Adds the list of known types to Scheme.
//func addKnownTypes(scheme *runtime.Scheme) error {
// scheme.AddKnownTypes(SchemeGroupVersion,
// &Cluster{},
// &ClusterList{},
// )
// metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
// return nil
//}

View File

@ -1,605 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Addon) DeepCopyInto(out *Addon) {
*out = *in
in.Sources.DeepCopyInto(&out.Sources)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
func (in *Addon) DeepCopy() *Addon {
if in == nil {
return nil
}
out := new(Addon)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CalicoCfg) DeepCopyInto(out *CalicoCfg) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoCfg.
func (in *CalicoCfg) DeepCopy() *CalicoCfg {
if in == nil {
return nil
}
out := new(CalicoCfg)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Chart) DeepCopyInto(out *Chart) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Chart.
func (in *Chart) DeepCopy() *Chart {
if in == nil {
return nil
}
out := new(Chart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
func (in *Cluster) DeepCopy() *Cluster {
if in == nil {
return nil
}
out := new(Cluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Cluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Cluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
func (in *ClusterList) DeepCopy() *ClusterList {
if in == nil {
return nil
}
out := new(ClusterList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.RoleGroups.DeepCopyInto(&out.RoleGroups)
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
out.Network = in.Network
in.Registry.DeepCopyInto(&out.Registry)
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.KubeSphere = in.KubeSphere
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
func (in *ClusterSpec) DeepCopy() *ClusterSpec {
if in == nil {
return nil
}
out := new(ClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
in.JobInfo.DeepCopyInto(&out.JobInfo)
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make([]NodeStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
if in == nil {
return nil
}
out := new(ClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
in.StartTime.DeepCopyInto(&out.StartTime)
in.EndTime.DeepCopyInto(&out.EndTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerInfo) DeepCopyInto(out *ContainerInfo) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInfo.
func (in *ContainerInfo) DeepCopy() *ContainerInfo {
if in == nil {
return nil
}
out := new(ContainerInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControlPlaneEndpoint) DeepCopyInto(out *ControlPlaneEndpoint) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpoint.
func (in *ControlPlaneEndpoint) DeepCopy() *ControlPlaneEndpoint {
if in == nil {
return nil
}
out := new(ControlPlaneEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = *in
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd.
func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
if in == nil {
return nil
}
out := new(ExternalEtcd)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlannelCfg) DeepCopyInto(out *FlannelCfg) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlannelCfg.
func (in *FlannelCfg) DeepCopy() *FlannelCfg {
if in == nil {
return nil
}
out := new(FlannelCfg)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostCfg) DeepCopyInto(out *HostCfg) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostCfg.
func (in *HostCfg) DeepCopy() *HostCfg {
if in == nil {
return nil
}
out := new(HostCfg)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostGroups) DeepCopyInto(out *HostGroups) {
*out = *in
if in.All != nil {
in, out := &in.All, &out.All
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Etcd != nil {
in, out := &in.Etcd, &out.Etcd
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Worker != nil {
in, out := &in.Worker, &out.Worker
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.K8s != nil {
in, out := &in.K8s, &out.K8s
*out = make([]HostCfg, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostGroups.
func (in *HostGroups) DeepCopy() *HostGroups {
if in == nil {
return nil
}
out := new(HostGroups)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobInfo) DeepCopyInto(out *JobInfo) {
*out = *in
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = make([]PodInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobInfo.
func (in *JobInfo) DeepCopy() *JobInfo {
if in == nil {
return nil
}
out := new(JobInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeSphere) DeepCopyInto(out *KubeSphere) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSphere.
func (in *KubeSphere) DeepCopy() *KubeSphere {
if in == nil {
return nil
}
out := new(KubeSphere)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeovnCfg) DeepCopyInto(out *KubeovnCfg) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeovnCfg.
func (in *KubeovnCfg) DeepCopy() *KubeovnCfg {
if in == nil {
return nil
}
out := new(KubeovnCfg)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
*out = *in
if in.ApiserverCertExtraSans != nil {
in, out := &in.ApiserverCertExtraSans, &out.ApiserverCertExtraSans
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ApiServerArgs != nil {
in, out := &in.ApiServerArgs, &out.ApiServerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ControllerManagerArgs != nil {
in, out := &in.ControllerManagerArgs, &out.ControllerManagerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SchedulerArgs != nil {
in, out := &in.SchedulerArgs, &out.SchedulerArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeletArgs != nil {
in, out := &in.KubeletArgs, &out.KubeletArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.KubeProxyArgs != nil {
in, out := &in.KubeProxyArgs, &out.KubeProxyArgs
*out = make([]string, len(*in))
copy(*out, *in)
}
in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration)
in.KubeProxyConfiguration.DeepCopyInto(&out.KubeProxyConfiguration)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes.
func (in *Kubernetes) DeepCopy() *Kubernetes {
if in == nil {
return nil
}
out := new(Kubernetes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkConfig) DeepCopyInto(out *NetworkConfig) {
*out = *in
out.Calico = in.Calico
out.Flannel = in.Flannel
out.Kubeovn = in.Kubeovn
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfig.
func (in *NetworkConfig) DeepCopy() *NetworkConfig {
if in == nil {
return nil
}
out := new(NetworkConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodInfo) DeepCopyInto(out *PodInfo) {
*out = *in
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]ContainerInfo, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodInfo.
func (in *PodInfo) DeepCopy() *PodInfo {
if in == nil {
return nil
}
out := new(PodInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RegistryConfig) DeepCopyInto(out *RegistryConfig) {
*out = *in
if in.RegistryMirrors != nil {
in, out := &in.RegistryMirrors, &out.RegistryMirrors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.InsecureRegistries != nil {
in, out := &in.InsecureRegistries, &out.InsecureRegistries
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryConfig.
func (in *RegistryConfig) DeepCopy() *RegistryConfig {
if in == nil {
return nil
}
out := new(RegistryConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleGroups) DeepCopyInto(out *RoleGroups) {
*out = *in
if in.Etcd != nil {
in, out := &in.Etcd, &out.Etcd
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Worker != nil {
in, out := &in.Worker, &out.Worker
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleGroups.
func (in *RoleGroups) DeepCopy() *RoleGroups {
if in == nil {
return nil
}
out := new(RoleGroups)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sources) DeepCopyInto(out *Sources) {
*out = *in
in.Chart.DeepCopyInto(&out.Chart)
in.Yaml.DeepCopyInto(&out.Yaml)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sources.
func (in *Sources) DeepCopy() *Sources {
if in == nil {
return nil
}
out := new(Sources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Yaml) DeepCopyInto(out *Yaml) {
*out = *in
if in.Path != nil {
in, out := &in.Path, &out.Path
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Yaml.
func (in *Yaml) DeepCopy() *Yaml {
if in == nil {
return nil
}
out := new(Yaml)
in.DeepCopyInto(out)
return out
}

View File

@ -1,13 +0,0 @@
package cmd
import "github.com/spf13/cobra"
// addCmd represents the add command
var addCmd = &cobra.Command{
Use: "add",
Short: "Add nodes to kubernetes cluster",
}
func init() {
rootCmd.AddCommand(addCmd)
}

View File

@ -1,41 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/add"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
// addNodesCmd represents the nodes command
var addNodesCmd = &cobra.Command{
Use: "nodes",
Short: "Add nodes to the cluster according to the new nodes information from the specified configuration file",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(opt.Verbose)
return add.AddNodes(opt.ClusterCfgFile, "", "", logger, false, opt.Verbose, opt.SkipCheck, opt.SkipPullImages, opt.InCluster, opt.DownloadCmd)
},
}
func init() {
addCmd.AddCommand(addNodesCmd)
addNodesCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
addNodesCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
addNodesCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
addNodesCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
`The user defined command to download the necessary binary files. The first param '%s' is output path, the second param '%s', is the URL`)
}

View File

@ -1,12 +0,0 @@
package cmd
import "github.com/spf13/cobra"
var certsCmd = &cobra.Command{
Use: "certs",
Short: "cluster certs",
}
func init() {
rootCmd.AddCommand(certsCmd)
}

View File

@ -1,85 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/pipelines"
"github.com/kubesphere/kubekey/version"
"github.com/spf13/cobra"
"time"
)
var logo = `
_ __ _ _ __
| | / / | | | | / /
| |/ / _ _| |__ ___| |/ / ___ _ _
| \| | | | '_ \ / _ \ \ / _ \ | | |
| |\ \ |_| | |_) | __/ |\ \ __/ |_| |
\_| \_/\__,_|_.__/ \___\_| \_/\___|\__, |
__/ |
|___/
`
// clusterCmd represents the cluster command
var clusterCmd = &cobra.Command{
Use: "cluster",
Short: "Create a Kubernetes or KubeSphere cluster",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(logo)
var ksVersion string
if opt.Kubesphere && len(args) > 0 {
ksVersion = args[0]
} else {
ksVersion = ""
}
return pipelines.CreateCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, opt.Kubesphere, opt.Verbose, opt.SkipCheck, opt.SkipPullImages, opt.InCluster, opt.LocalStorage)
},
}
func init() {
createCmd.AddCommand(clusterCmd)
clusterCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
clusterCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", v1alpha1.DefaultKubeVersion, "Specify a supported version of kubernetes")
clusterCmd.Flags().BoolVarP(&opt.LocalStorage, "with-local-storage", "", false, "Deploy a local PV provisioner")
clusterCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.1.0)")
clusterCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
clusterCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
clusterCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
`The user defined command to download the necessary binary files. The first param '%s' is output path, the second param '%s', is the URL`)
if err := setValidArgs(clusterCmd); err != nil {
panic(fmt.Sprintf("Got error with the completion setting"))
}
}
func setValidArgs(cmd *cobra.Command) (err error) {
cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) (
strings []string, directive cobra.ShellCompDirective) {
versionArray := []string{"v2.1.1", "v3.0.0", "v3.1.0", "v3.1.1", time.Now().Add(-time.Hour * 24).Format("nightly-20060102")}
return versionArray, cobra.ShellCompDirectiveNoFileComp
}
err = cmd.RegisterFlagCompletionFunc("with-kubernetes", func(cmd *cobra.Command, args []string, toComplete string) (
strings []string, directive cobra.ShellCompDirective) {
return version.SupportedK8sVersionList(), cobra.ShellCompDirectiveNoFileComp
})
return
}

View File

@ -1,95 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
// CompletionOptions is the option of completion command
type CompletionOptions struct {
Type string
}
// ShellTypes contains all types of shell
var ShellTypes = []string{
"zsh", "bash", "powerShell",
}
var completionOptions CompletionOptions
func init() {
rootCmd.AddCommand(completionCmd)
flags := completionCmd.Flags()
flags.StringVarP(&completionOptions.Type, "type", "t", "",
fmt.Sprintf("Generate different types of shell which are %v", ShellTypes))
err := completionCmd.RegisterFlagCompletionFunc("type", func(cmd *cobra.Command, args []string, toComplete string) (
i []string, directive cobra.ShellCompDirective) {
return ShellTypes, cobra.ShellCompDirectiveDefault
})
if err != nil {
completionCmd.PrintErrf("register flag type for sub-command doc failed %#v\n", err)
}
}
var completionCmd = &cobra.Command{
Use: "completion",
Short: "Generate shell completion scripts",
Long: `Generate shell completion scripts
Normally you don't need to do more extra work to have this feature if you've installed kk by brew`,
Example: `# Installing bash completion on Linux
## If bash-completion is not installed on Linux, please install the 'bash-completion' package
## via your distribution's package manager.
## Load the ks completion code for bash into the current shell
source <(ks completion bash)
## Write bash completion code to a file and source if from .bash_profile
mkdir -p ~/.config/kk/ && kk completion --type bash > ~/.config/kk/completion.bash.inc
printf "
# kk shell completion
source '$HOME/.config/kk/completion.bash.inc'
" >> $HOME/.bash_profile
source $HOME/.bash_profile
In order to have good experience on zsh completion, ohmyzsh is a good choice.
Please install ohmyzsh by the following command
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
Get more details about onmyzsh from https://github.com/ohmyzsh/ohmyzsh
Load the kk completion code for zsh[1] into the current shell
source <(kk completion --type zsh)
Set the kk completion code for zsh[1] to autoload on startup
kk completion --type zsh > "${fpath[1]}/_kk"`,
RunE: func(cmd *cobra.Command, _ []string) (err error) {
shellType := completionOptions.Type
switch shellType {
case "zsh":
err = rootCmd.GenZshCompletion(cmd.OutOrStdout())
case "powerShell":
err = rootCmd.GenPowerShellCompletion(cmd.OutOrStdout())
case "bash":
err = rootCmd.GenBashCompletion(cmd.OutOrStdout())
case "":
err = cmd.Help()
default:
err = fmt.Errorf("unknown shell type %s", shellType)
}
return
},
}

View File

@ -1,51 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/kubesphere/kubekey/pkg/config"
"github.com/spf13/cobra"
)
// configCmd represents the config command
var configCmd = &cobra.Command{
Use: "config",
Short: "Create cluster configuration file",
RunE: func(cmd *cobra.Command, args []string) error {
var ksVersion string
if opt.Kubesphere && len(args) > 0 {
ksVersion = args[0]
} else {
ksVersion = ""
}
err := config.GenerateClusterObj(opt.Kubernetes, ksVersion, opt.Name, opt.Kubeconfig, opt.ClusterCfgPath, opt.Kubesphere, opt.FromCluster)
if err != nil {
return err
}
return err
},
}
func init() {
createCmd.AddCommand(configCmd)
configCmd.Flags().StringVarP(&opt.Name, "name", "", "sample", "Specify a name of cluster object")
configCmd.Flags().StringVarP(&opt.ClusterCfgPath, "filename", "f", "", "Specify a configuration file path")
configCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
configCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.1.0)")
configCmd.Flags().BoolVarP(&opt.FromCluster, "from-cluster", "", false, "Create a configuration based on existing cluster")
configCmd.Flags().StringVarP(&opt.Kubeconfig, "kubeconfig", "", "", "Specify a kubeconfig file")
}

View File

@ -1,30 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/spf13/cobra"
)
// createCmd represents the create command
var createCmd = &cobra.Command{
Use: "create",
Short: "Create a cluster or a cluster configuration file",
}
func init() {
rootCmd.AddCommand(createCmd)
}

View File

@ -1,30 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/spf13/cobra"
)
// deleteCmd represents the delete command
var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete nodes or cluster",
}
func init() {
rootCmd.AddCommand(deleteCmd)
}

View File

@ -1,22 +0,0 @@
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/delete"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
var deleteClusterCmd = &cobra.Command{
Use: "cluster",
Short: "Delete a cluster",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(opt.Verbose)
return delete.ResetCluster(opt.ClusterCfgFile, logger, opt.Verbose)
},
}
func init() {
deleteCmd.AddCommand(deleteClusterCmd)
deleteClusterCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
}

View File

@ -1,22 +0,0 @@
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/delete"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
"strings"
)
var deleteNodeCmd = &cobra.Command{
Use: "node",
Short: "delete a node",
Run: func(cmd *cobra.Command, args []string) {
logger := util.InitLogger(opt.Verbose)
_ = delete.ResetNode(opt.ClusterCfgFile, logger, opt.Verbose, strings.Join(args, ""))
},
}
func init() {
deleteCmd.AddCommand(deleteNodeCmd)
deleteNodeCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
}

View File

@ -1,30 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/spf13/cobra"
)
// initCmd represents the create command
var initCmd = &cobra.Command{
Use: "init",
Short: "Initializes the installation environment",
}
func init() {
rootCmd.AddCommand(initCmd)
}

View File

@ -1,39 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/kubesphere/kubekey/pkg/bootstrap/dependencies"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
// osCmd represents the os command
var osCmd = &cobra.Command{
Use: "os",
Short: "Init operating system",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(opt.Verbose)
return dependencies.InitDependencies(opt.ClusterCfgFile, opt.SourcesDir, opt.AddImagesRepo, logger)
},
}
func init() {
initCmd.AddCommand(osCmd)
osCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
osCmd.Flags().StringVarP(&opt.SourcesDir, "sources", "s", "", "Path to the dependencies' dir")
osCmd.Flags().BoolVarP(&opt.AddImagesRepo, "add-images-repo", "", false, "Create a local images registry")
}

View File

@ -1,37 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/kubesphere/kubekey/pkg/bootstrap/registry"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
// osCmd represents the os command
var registryCmd = &cobra.Command{
Use: "registry",
Short: "Init a local image registry",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(opt.Verbose)
return registry.InitRegistry(opt.ClusterCfgFile, logger)
},
}
func init() {
initCmd.AddCommand(registryCmd)
registryCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
}

View File

@ -1,26 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/kubesphere/kubekey/experiment/cmd"
)
// Using a separate entry-point can reduce the size of the binary file
func main() {
cmd.Execute()
}

View File

@ -1,22 +0,0 @@
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/certs"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
var listClusterCertsCmd = &cobra.Command{
Use: "check-expiration",
Short: "Check certificates expiration for a Kubernetes cluster",
Run: func(cmd *cobra.Command, args []string) {
logger := util.InitLogger(opt.Verbose)
_ = certs.ListCluster(opt.ClusterCfgFile, logger, opt.Verbose)
},
}
func init() {
certsCmd.AddCommand(listClusterCertsCmd)
listClusterCertsCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
}

View File

@ -1,22 +0,0 @@
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/certs"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
var renewClusterCertsCmd = &cobra.Command{
Use: "renew",
Short: "renew a cluster certs",
Run: func(cmd *cobra.Command, args []string) {
logger := util.InitLogger(opt.Verbose)
_ = certs.RenewClusterCerts(opt.ClusterCfgFile, logger, opt.Verbose)
},
}
func init() {
certsCmd.AddCommand(renewClusterCertsCmd)
renewClusterCertsCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
}

View File

@ -1,80 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"os"
"os/exec"
)
type Options struct {
Verbose bool
Addons string
Name string
ClusterCfgPath string
Kubeconfig string
FromCluster bool
ClusterCfgFile string
Kubernetes string
Kubesphere bool
LocalStorage bool
SkipCheck bool
SkipPullImages bool
KsVersion string
Registry string
SourcesDir string
AddImagesRepo bool
InCluster bool
DownloadCmd string
}
var (
opt Options
)
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "kk",
Short: "Kubernetes/KubeSphere Deploy Tool",
Long: `Deploy a Kubernetes or KubeSphere cluster efficiently, flexibly and easily. There are three scenarios to use KubeKey.
1. Install Kubernetes only
2. Install Kubernetes and KubeSphere together in one command
3. Install Kubernetes first, then deploy KubeSphere on it using https://github.com/kubesphere/ks-installer`,
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
_ = exec.Command("/bin/bash", "-c", "ulimit -u 65535").Run()
_ = exec.Command("/bin/bash", "-c", "ulimit -n 65535").Run()
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().BoolVar(&opt.InCluster, "in-cluster", false, "Running inside the cluster")
rootCmd.PersistentFlags().BoolVar(&opt.Verbose, "debug", true, "Print detailed information")
// Cobra also supports local flags, which will only run
// when this action is called directly.
}

View File

@ -1,48 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/kubesphere/kubekey/pkg/cluster/upgrade"
"github.com/kubesphere/kubekey/pkg/util"
"github.com/spf13/cobra"
)
// upgradeCmd represents the upgrade command
var upgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "Upgrade your cluster smoothly to a newer version with this command",
RunE: func(cmd *cobra.Command, args []string) error {
logger := util.InitLogger(opt.Verbose)
var ksVersion string
if opt.Kubesphere && len(args) > 0 {
ksVersion = args[0]
} else {
ksVersion = ""
}
return upgrade.UpgradeCluster(opt.ClusterCfgFile, opt.Kubernetes, ksVersion, logger, opt.Kubesphere, opt.Verbose, opt.SkipPullImages, opt.DownloadCmd)
},
}
func init() {
rootCmd.AddCommand(upgradeCmd)
upgradeCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
upgradeCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", "", "Specify a supported version of kubernetes")
upgradeCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.1.0)")
upgradeCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
upgradeCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
`The user defined command to download the necessary binary files. The first param '%s' is output path, the second param '%s', is the URL`)
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/kubesphere/kubekey/version"
"github.com/spf13/cobra"
"io"
"strings"
)
var shortVersion bool
var showSupportedK8sVersionList bool
// versionCmd represents the version command
var versionCmd = &cobra.Command{
Use: "version",
Short: "print the client version information",
RunE: func(cmd *cobra.Command, _ []string) error {
if showSupportedK8sVersionList {
return printSupportedK8sVersionList(cmd.OutOrStdout())
}
return printVersion(shortVersion)
},
}
func init() {
rootCmd.AddCommand(versionCmd)
versionCmd.Flags().BoolVarP(&shortVersion, "short", "", false, "print the version number")
versionCmd.Flags().BoolVarP(&showSupportedK8sVersionList, "show-supported-k8s", "", false,
`print the version of supported k8s`)
}
func printVersion(short bool) error {
v := version.Get()
if short {
if len(v.GitCommit) >= 7 {
fmt.Printf("%s+g%s\n", v.Version, v.GitCommit[:7])
return nil
}
fmt.Println(version.GetVersion())
}
fmt.Printf("%#v\n", v)
return nil
}
func printSupportedK8sVersionList(output io.Writer) (err error) {
_, err = output.Write([]byte(fmt.Sprintln(strings.Join(version.SupportedK8sVersionList(), "\n"))))
return
}

View File

@ -1,14 +0,0 @@
package action
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/ending"
"github.com/kubesphere/kubekey/experiment/core/vars"
)
type Action interface {
Execute(vars vars.Vars) (err error)
Init(mgr *config.Runtime, cache *cache.Cache, rootCache *cache.Cache)
WrapResult(err error) *ending.Result
}

View File

@ -1,10 +1,10 @@
package action
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/ending"
"github.com/kubesphere/kubekey/experiment/core/vars"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/ending"
"github.com/kubesphere/kubekey/pkg/core/vars"
"github.com/pkg/errors"
)

View File

@ -2,7 +2,7 @@ package action
import (
"fmt"
"github.com/kubesphere/kubekey/experiment/core/vars"
"github.com/kubesphere/kubekey/pkg/core/vars"
)
type Copy struct {

View File

@ -0,0 +1,14 @@
package action
import (
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/ending"
"github.com/kubesphere/kubekey/pkg/core/vars"
)
type Action interface {
Execute(vars vars.Vars) (err error)
Init(mgr *config.Runtime, cache *cache.Cache, rootCache *cache.Cache)
WrapResult(err error) *ending.Result
}

View File

@ -2,8 +2,8 @@ package action
import (
"fmt"
"github.com/kubesphere/kubekey/experiment/core/util"
"github.com/kubesphere/kubekey/experiment/core/vars"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/core/vars"
"github.com/pkg/errors"
"path/filepath"
"text/template"

View File

@ -3,8 +3,8 @@ package config
import (
"bufio"
"fmt"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/util"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/kubesphere"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"

View File

@ -2,13 +2,13 @@ package config
import (
"fmt"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
kubekeyclientset "github.com/kubesphere/kubekey/clients/clientset/versioned"
kubekeycontroller "github.com/kubesphere/kubekey/controllers/kubekey"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/connector"
"github.com/kubesphere/kubekey/experiment/core/connector/ssh"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/runner"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/connector/ssh"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/runner"
"github.com/pkg/errors"
"os"
"path/filepath"

View File

@ -1,7 +1,7 @@
package connector
import (
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"io"
"os"
)

View File

@ -1,8 +1,8 @@
package ssh
import (
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/connector"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/core/connector"
"sync"
"time"
)

View File

@ -3,9 +3,9 @@ package ssh
import (
"context"
"fmt"
"github.com/kubesphere/kubekey/experiment/core/connector"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/util"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"

View File

@ -1,9 +1,9 @@
package modules
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/logger"
)
type BaseModule struct {

View File

@ -1,8 +1,8 @@
package modules
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
)
type Module interface {

View File

@ -1,15 +1,15 @@
package modules
import (
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/action"
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/ending"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/prepare"
"github.com/kubesphere/kubekey/experiment/core/runner"
"github.com/kubesphere/kubekey/experiment/core/vars"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/core/action"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/ending"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/prepare"
"github.com/kubesphere/kubekey/pkg/core/runner"
"github.com/kubesphere/kubekey/pkg/core/vars"
"github.com/pkg/errors"
"sync"
"time"

View File

@ -1,9 +1,9 @@
package modules
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/logger"
cache2 "github.com/kubesphere/kubekey/pkg/core/cache"
config2 "github.com/kubesphere/kubekey/pkg/core/config"
logger2 "github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/pkg/errors"
)
@ -12,14 +12,14 @@ type BaseTaskModule struct {
Tasks []Task
}
func (t *BaseTaskModule) Default(runtime *config.Runtime, rootCache *cache.Cache) {
func (t *BaseTaskModule) Default(runtime *config2.Runtime, rootCache *cache2.Cache) {
if t.Name == "" {
t.Name = DefaultTaskModuleName
}
t.Runtime = runtime
t.RootCache = rootCache
t.Cache = cache.NewCache()
t.Cache = cache2.NewCache()
}
func (t *BaseTaskModule) Init() {
@ -30,8 +30,8 @@ func (t *BaseTaskModule) Is() string {
}
func (t *BaseTaskModule) Run() error {
logger.Log.SetModule(t.Name)
logger.Log.Info("Begin Run")
logger2.Log.SetModule(t.Name)
logger2.Log.Info("Begin Run")
for i := range t.Tasks {
task := t.Tasks[i]
task.Init(t.Runtime, t.Cache, t.RootCache)

View File

@ -1,10 +1,10 @@
package pipeline
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/modules"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/modules"
"github.com/pkg/errors"
)

View File

@ -1,8 +1,8 @@
package prepare
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
)
type BasePrepare struct {

View File

@ -1,8 +1,8 @@
package prepare
import (
"github.com/kubesphere/kubekey/experiment/core/cache"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/pkg/core/cache"
"github.com/kubesphere/kubekey/pkg/core/config"
)
type Prepare interface {

View File

@ -3,10 +3,10 @@ package runner
import (
"errors"
"fmt"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/connector"
"github.com/kubesphere/kubekey/experiment/core/connector/ssh"
"github.com/kubesphere/kubekey/experiment/core/logger"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/connector/ssh"
"github.com/kubesphere/kubekey/pkg/core/logger"
"os"
)

View File

@ -3,8 +3,8 @@ package util
import (
"crypto/md5"
"fmt"
"github.com/kubesphere/kubekey/experiment/core/common"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/pkg/core/common"
"github.com/kubesphere/kubekey/pkg/core/logger"
"io"
"io/ioutil"
"os"

View File

@ -20,8 +20,8 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"net"
"os"
"os/exec"
@ -206,7 +206,7 @@ func GetLocalIP() (string, error) {
func LocalIP() string {
localIp, err := GetLocalIP()
if err != nil {
log.Fatalf("Failed to get Local IP: %v", err)
logger.Log.Fatalf("Failed to get Local IP: %v", err)
}
return localIp
}

View File

@ -1,10 +1,10 @@
package pipelines
import (
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/modules"
"github.com/kubesphere/kubekey/experiment/core/pipeline"
"github.com/kubesphere/kubekey/experiment/pipelines/initialization"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/modules"
"github.com/kubesphere/kubekey/pkg/core/pipeline"
"github.com/kubesphere/kubekey/pkg/pipelines/initialization"
)
func NewCreateClusterPipeline(runtime *config.Runtime) error {

View File

@ -3,9 +3,9 @@ package initialization
import (
"bufio"
"fmt"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/modules"
"github.com/kubesphere/kubekey/experiment/core/prepare"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/modules"
"github.com/kubesphere/kubekey/pkg/core/prepare"
"github.com/mitchellh/mapstructure"
"github.com/modood/table"
"github.com/pkg/errors"

View File

@ -2,9 +2,9 @@ package initialization
import (
"fmt"
"github.com/kubesphere/kubekey/experiment/core/action"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/vars"
"github.com/kubesphere/kubekey/pkg/core/action"
"github.com/kubesphere/kubekey/pkg/core/logger"
"github.com/kubesphere/kubekey/pkg/core/vars"
"strings"
)

View File

@ -1,13 +1,13 @@
package loadbalancer
import (
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/experiment/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/experiment/core/action"
"github.com/kubesphere/kubekey/experiment/core/config"
"github.com/kubesphere/kubekey/experiment/core/modules"
"github.com/kubesphere/kubekey/experiment/core/prepare"
"github.com/kubesphere/kubekey/experiment/core/util"
"github.com/kubesphere/kubekey/experiment/pipelines/loadbalancer/templates"
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/core/action"
"github.com/kubesphere/kubekey/pkg/core/config"
"github.com/kubesphere/kubekey/pkg/core/modules"
"github.com/kubesphere/kubekey/pkg/core/prepare"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/pipelines/loadbalancer/templates"
"strconv"
)

View File

@ -2,10 +2,10 @@ package loadbalancer
import (
"fmt"
"github.com/kubesphere/kubekey/experiment/core/action"
"github.com/kubesphere/kubekey/experiment/core/logger"
"github.com/kubesphere/kubekey/experiment/core/prepare"
"github.com/kubesphere/kubekey/experiment/core/vars"
action2 "github.com/kubesphere/kubekey/pkg/core/action"
logger2 "github.com/kubesphere/kubekey/pkg/core/logger"
prepare2 "github.com/kubesphere/kubekey/pkg/core/prepare"
vars2 "github.com/kubesphere/kubekey/pkg/core/vars"
"github.com/pkg/errors"
"os"
"strconv"
@ -13,10 +13,10 @@ import (
)
type haproxyPreparatoryWork struct {
action.BaseAction
action2.BaseAction
}
func (h *haproxyPreparatoryWork) Execute(vars vars.Vars) error {
func (h *haproxyPreparatoryWork) Execute(vars vars2.Vars) error {
if err := h.Runtime.Runner.MkDir("/etc/kubekey/haproxy"); err != nil {
return err
}
@ -27,10 +27,10 @@ func (h *haproxyPreparatoryWork) Execute(vars vars.Vars) error {
}
type getChecksum struct {
action.BaseAction
action2.BaseAction
}
func (g *getChecksum) Execute(vars vars.Vars) error {
func (g *getChecksum) Execute(vars vars2.Vars) error {
md5Str, err := g.Runtime.Runner.FileMd5("/etc/kubekey/haproxy/haproxy.cfg")
if err != nil {
return err
@ -40,7 +40,7 @@ func (g *getChecksum) Execute(vars vars.Vars) error {
}
type updateK3sPrepare struct {
prepare.BasePrepare
prepare2.BasePrepare
}
func (u *updateK3sPrepare) PreCheck() (bool, error) {
@ -54,7 +54,7 @@ func (u *updateK3sPrepare) PreCheck() (bool, error) {
return false, err
} else {
if strings.Contains(strings.TrimSpace(out), LocalServer) {
logger.Log.Debugf("do not restart kubelet, /etc/systemd/system/k3s.service content is %s", out)
logger2.Log.Debugf("do not restart kubelet, /etc/systemd/system/k3s.service content is %s", out)
return false, nil
}
}
@ -65,10 +65,10 @@ func (u *updateK3sPrepare) PreCheck() (bool, error) {
}
type updateK3s struct {
action.BaseAction
action2.BaseAction
}
func (u *updateK3s) Execute(vars vars.Vars) error {
func (u *updateK3s) Execute(vars vars2.Vars) error {
if _, err := u.Runtime.Runner.SudoCmd("sed -i 's#--server=.*\"#--server=https://127.0.0.1:%s\"#g' /etc/systemd/system/k3s.service", false); err != nil {
return err
}
@ -79,7 +79,7 @@ func (u *updateK3s) Execute(vars vars.Vars) error {
}
type updateKubeletPrepare struct {
prepare.BasePrepare
prepare2.BasePrepare
}
func (u *updateKubeletPrepare) PreCheck() (bool, error) {
@ -93,7 +93,7 @@ func (u *updateKubeletPrepare) PreCheck() (bool, error) {
return false, err
} else {
if strings.Contains(strings.TrimSpace(out), LocalServer) {
logger.Log.Debugf("do not restart kubelet, /etc/kubernetes/kubelet.conf content is %s", out)
logger2.Log.Debugf("do not restart kubelet, /etc/kubernetes/kubelet.conf content is %s", out)
return false, nil
}
}
@ -104,10 +104,10 @@ func (u *updateKubeletPrepare) PreCheck() (bool, error) {
}
type updateKubelet struct {
action.BaseAction
action2.BaseAction
}
func (u *updateKubelet) Execute(vars vars.Vars) error {
func (u *updateKubelet) Execute(vars vars2.Vars) error {
if _, err := u.Runtime.Runner.SudoCmd(fmt.Sprintf(
"sed -i 's#server:.*#server: https://127.0.0.1:%s#g' /etc/kubernetes/kubelet.conf",
strconv.Itoa(u.Runtime.Cluster.ControlPlaneEndpoint.Port)), false); err != nil {
@ -120,7 +120,7 @@ func (u *updateKubelet) Execute(vars vars.Vars) error {
}
type updateKubeproxyPrapre struct {
prepare.BasePrepare
prepare2.BasePrepare
}
func (u *updateKubeproxyPrapre) PreCheck() (bool, error) {
@ -130,7 +130,7 @@ func (u *updateKubeproxyPrapre) PreCheck() (bool, error) {
return false, err
} else {
if strings.Contains(strings.TrimSpace(out), LocalServer) {
logger.Log.Debugf("do not restart kube-proxy, configmap kube-proxy content is %s", out)
logger2.Log.Debugf("do not restart kube-proxy, configmap kube-proxy content is %s", out)
return false, nil
}
}
@ -138,10 +138,10 @@ func (u *updateKubeproxyPrapre) PreCheck() (bool, error) {
}
type updateKubeproxy struct {
action.BaseAction
action2.BaseAction
}
func (u *updateKubeproxy) Execute(vars vars.Vars) error {
func (u *updateKubeproxy) Execute(vars vars2.Vars) error {
if _, err := u.Runtime.Runner.SudoCmd("set -o pipefail "+
"&& /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml "+
"| sed 's#server:.*#server: https://127.0.0.1:%s#g' "+
@ -155,10 +155,10 @@ func (u *updateKubeproxy) Execute(vars vars.Vars) error {
}
type updateHosts struct {
action.BaseAction
action2.BaseAction
}
func (u *updateHosts) Execute(vars vars.Vars) error {
func (u *updateHosts) Execute(vars vars2.Vars) error {
if _, err := u.Runtime.Runner.SudoCmd("sed -i 's#.* %s#127.0.0.1 %s#g' /etc/hosts", false); err != nil {
return err
}

View File

@ -66,7 +66,7 @@ func NewExecutor(cluster *kubekeyapiv1alpha1.ClusterSpec, objName string, logger
func (executor *Executor) CreateManager() (*manager.Manager, error) {
mgr := &manager.Manager{}
defaultCluster, hostGroups, err := executor.Cluster.SetDefaultClusterSpec(executor.InCluster, executor.Logger)
defaultCluster, hostGroups, err := executor.Cluster.SetDefaultClusterSpec(executor.InCluster)
if err != nil {
return nil, err
}