mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
dev-v2.0.0: fix auth bug
Signed-off-by: 24sama <leo@kubesphere.io>
This commit is contained in:
parent
be2947ff9b
commit
c9773712a5
|
|
@ -19,7 +19,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o kk cmd/k
|
|||
# Build the manager image
|
||||
FROM debian:stable
|
||||
|
||||
RUN useradd -m kubekey && apt-get update && apt-get install bash curl -y; apt-get autoclean; rm -rf /var/lib/apt/lists/*
|
||||
RUN useradd -m kubekey -u 1000 && apt-get update && apt-get install bash curl -y; apt-get autoclean; rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER kubekey:kubekey
|
||||
RUN mkdir -p /home/kubekey/kubekey
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
FROM alpine:3
|
||||
|
||||
ENV KUBEVERSION=v1.17.9
|
||||
ENV KUBEVERSION=v1.21.5
|
||||
ENV ETCDVERSION=v3.4.13
|
||||
ENV CNIVERSION=v0.8.6
|
||||
ENV HELMVERSION=v3.2.1
|
||||
ENV CNIVERSION=v0.9.1
|
||||
ENV HELMVERSION=v3.6.3
|
||||
ENV ARCH=amd64
|
||||
|
||||
WORKDIR /kubekey/${KUBEVERSION}/${ARCH}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,9 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = GroupVersion
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha1"}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,9 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = GroupVersion
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "kubekey.kubesphere.io", Version: "v1alpha2"}
|
||||
|
|
@ -37,4 +40,4 @@ var (
|
|||
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return GroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
|
@ -26,11 +26,11 @@ var addNodesCmd = &cobra.Command{
|
|||
Use: "nodes",
|
||||
Short: "Add nodes to the cluster according to the new nodes information from the specified configuration file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
KsEnable: false,
|
||||
Debug: opt.Verbose,
|
||||
SkipCheck: opt.SkipCheck,
|
||||
SkipConfirmCheck: opt.SkipConfirmCheck,
|
||||
SkipPullImages: opt.SkipPullImages,
|
||||
InCluster: opt.InCluster,
|
||||
ContainerManager: opt.ContainerManager,
|
||||
|
|
@ -42,7 +42,6 @@ var addNodesCmd = &cobra.Command{
|
|||
func init() {
|
||||
addCmd.AddCommand(addNodesCmd)
|
||||
addNodesCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
|
||||
addNodesCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
addNodesCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
|
||||
addNodesCmd.Flags().StringVarP(&opt.ContainerManager, "container-manager", "", "docker", "Container manager: docker, crio, containerd and isula.")
|
||||
addNodesCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubesphere/kubekey/apis/kubekey/v1alpha1"
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/pipelines"
|
||||
"github.com/kubesphere/kubekey/version"
|
||||
"github.com/spf13/cobra"
|
||||
|
|
@ -37,16 +37,16 @@ var clusterCmd = &cobra.Command{
|
|||
ksVersion = ""
|
||||
}
|
||||
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
KubernetesVersion: opt.Kubernetes,
|
||||
KsEnable: opt.Kubesphere,
|
||||
KsVersion: ksVersion,
|
||||
SkipCheck: opt.SkipCheck,
|
||||
SkipPullImages: opt.SkipPullImages,
|
||||
InCluster: opt.InCluster,
|
||||
DeployLocalStorage: opt.LocalStorage,
|
||||
Debug: opt.Verbose,
|
||||
SkipConfirmCheck: opt.SkipConfirmCheck,
|
||||
ContainerManager: opt.ContainerManager,
|
||||
}
|
||||
|
||||
|
|
@ -58,10 +58,9 @@ func init() {
|
|||
createCmd.AddCommand(clusterCmd)
|
||||
|
||||
clusterCmd.Flags().StringVarP(&opt.ClusterCfgFile, "filename", "f", "", "Path to a configuration file")
|
||||
clusterCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", v1alpha1.DefaultKubeVersion, "Specify a supported version of kubernetes")
|
||||
clusterCmd.Flags().StringVarP(&opt.Kubernetes, "with-kubernetes", "", v1alpha2.DefaultKubeVersion, "Specify a supported version of kubernetes")
|
||||
clusterCmd.Flags().BoolVarP(&opt.LocalStorage, "with-local-storage", "", false, "Deploy a local PV provisioner")
|
||||
clusterCmd.Flags().BoolVarP(&opt.Kubesphere, "with-kubesphere", "", false, "Deploy a specific version of kubesphere (default v3.1.0)")
|
||||
clusterCmd.Flags().BoolVarP(&opt.SkipCheck, "yes", "y", false, "Skip pre-check of the installation")
|
||||
clusterCmd.Flags().BoolVarP(&opt.SkipPullImages, "skip-pull-images", "", false, "Skip pre pull images")
|
||||
clusterCmd.Flags().StringVarP(&opt.ContainerManager, "container-manager", "", "docker", "Container runtime: docker, crio, containerd and isula.")
|
||||
clusterCmd.Flags().StringVarP(&opt.DownloadCmd, "download-cmd", "", "curl -L -o %s %s",
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
config2 "github.com/kubesphere/kubekey/pkg/config"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
|
@ -33,7 +33,7 @@ var configCmd = &cobra.Command{
|
|||
ksVersion = ""
|
||||
}
|
||||
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
KubernetesVersion: opt.Kubernetes,
|
||||
KsEnable: opt.Kubesphere,
|
||||
|
|
@ -42,7 +42,7 @@ var configCmd = &cobra.Command{
|
|||
KubeConfig: opt.Kubeconfig,
|
||||
}
|
||||
|
||||
err := config2.GenerateKubeKeyConfig(arg, opt.Name)
|
||||
err := config.GenerateKubeKeyConfig(arg, opt.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
|
@ -10,7 +10,7 @@ var deleteClusterCmd = &cobra.Command{
|
|||
Use: "cluster",
|
||||
Short: "Delete a cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
Debug: opt.Verbose,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/pipelines"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
|
@ -18,7 +18,7 @@ var deleteNodeCmd = &cobra.Command{
|
|||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
Debug: opt.Verbose,
|
||||
NodeName: strings.Join(args, ""),
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
common2 "github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/common"
|
||||
"github.com/kubesphere/kubekey/pkg/pipelines"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
|
@ -26,7 +26,7 @@ var osCmd = &cobra.Command{
|
|||
Use: "os",
|
||||
Short: "Init operating system",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
arg := common2.Argument{
|
||||
arg := common.Argument{
|
||||
FilePath: opt.ClusterCfgFile,
|
||||
SourcesDir: opt.SourcesDir,
|
||||
AddImagesRepo: opt.AddImagesRepo,
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ type Options struct {
|
|||
Kubernetes string
|
||||
Kubesphere bool
|
||||
LocalStorage bool
|
||||
SkipCheck bool
|
||||
SkipPullImages bool
|
||||
KsVersion string
|
||||
Registry string
|
||||
|
|
@ -41,6 +40,7 @@ type Options struct {
|
|||
ContainerManager string
|
||||
InCluster bool
|
||||
DownloadCmd string
|
||||
SkipConfirmCheck bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -74,7 +74,7 @@ func init() {
|
|||
// will be global for your application.
|
||||
rootCmd.PersistentFlags().BoolVar(&opt.InCluster, "in-cluster", false, "Running inside the cluster")
|
||||
rootCmd.PersistentFlags().BoolVar(&opt.Verbose, "debug", true, "Print detailed information")
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(&opt.SkipConfirmCheck, "yes", "y", false, "Skip confirm check")
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ var upgradeCmd = &cobra.Command{
|
|||
KubernetesVersion: opt.Kubernetes,
|
||||
KsEnable: opt.Kubesphere,
|
||||
KsVersion: ksVersion,
|
||||
SkipCheck: opt.SkipCheck,
|
||||
SkipConfirmCheck: opt.SkipConfirmCheck,
|
||||
SkipPullImages: opt.SkipPullImages,
|
||||
InCluster: opt.InCluster,
|
||||
DeployLocalStorage: opt.LocalStorage,
|
||||
|
|
|
|||
|
|
@ -342,6 +342,8 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
|
|||
}
|
||||
|
||||
imageRepoList := strings.Split(image, "/")
|
||||
var kubekey int64
|
||||
kubekey = 1000
|
||||
|
||||
job := &batchv1.Job{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
|
|
@ -383,6 +385,10 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
|
|||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: &kubekey,
|
||||
FSGroup: &kubekey,
|
||||
},
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "kube-binaries",
|
||||
|
|
@ -403,7 +409,7 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
|
|||
Containers: []corev1.Container{{
|
||||
Name: "runner",
|
||||
Image: image,
|
||||
ImagePullPolicy: "Always",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Command: []string{"/home/kubekey/kk"},
|
||||
Args: args,
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
|
|
@ -418,7 +424,7 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
|
|||
},
|
||||
}},
|
||||
NodeName: nodeName,
|
||||
ServiceAccountName: "default",
|
||||
ServiceAccountName: "kubekey-controller-manager",
|
||||
RestartPolicy: "Never",
|
||||
},
|
||||
},
|
||||
|
|
@ -566,7 +572,7 @@ func sendHostsAction(action int, hosts []kubekeyv1alpha2.HostCfg, log logr.Logge
|
|||
}
|
||||
|
||||
fmt.Println(string(hostsInfoBytes))
|
||||
req, err := http.NewRequest("POST", "http://localhost:8090/api/v1alpha1/hosts", bytes.NewReader(hostsInfoBytes))
|
||||
req, err := http.NewRequest("POST", "http://localhost:8090/api/v1alpha2/hosts", bytes.NewReader(hostsInfoBytes))
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to create request")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ const (
|
|||
var (
|
||||
newNodes []string
|
||||
clusterKubeSphere = template.Must(template.New("cluster.kubesphere.io").Parse(
|
||||
dedent.Dedent(`apiVersion: cluster.kubesphere.io/v1alpha1
|
||||
dedent.Dedent(`apiVersion: cluster.kubesphere.io/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
finalizers:
|
||||
|
|
@ -97,7 +97,7 @@ func CheckClusterRole() (bool, *rest.Config, error) {
|
|||
}
|
||||
var hostClusterFlag bool
|
||||
if err := clientset.RESTClient().Get().
|
||||
AbsPath("/apis/cluster.kubesphere.io/v1alpha1/clusters").
|
||||
AbsPath("/apis/cluster.kubesphere.io/v1alpha2/clusters").
|
||||
Name("host").
|
||||
Do(context.TODO()).Error(); err == nil {
|
||||
hostClusterFlag = true
|
||||
|
|
@ -172,7 +172,7 @@ func getCluster(name string) (*kubekeyapiv1alpha2.Cluster, error) {
|
|||
}
|
||||
|
||||
// UpdateClusterConditions is used for updating cluster installation process information or adding nodes.
|
||||
func UpdateClusterConditions(kubeConf *common.KubeConf, step string, result *ending.ModuleResult) error {
|
||||
func UpdateClusterConditions(runtime *common.KubeRuntime, step string, result *ending.ModuleResult) error {
|
||||
m := make(map[string]kubekeyapiv1alpha2.Event)
|
||||
allStatus := true
|
||||
for k, v := range result.HostResults {
|
||||
|
|
@ -180,9 +180,11 @@ func UpdateClusterConditions(kubeConf *common.KubeConf, step string, result *end
|
|||
allStatus = false
|
||||
}
|
||||
e := kubekeyapiv1alpha2.Event{
|
||||
Step: step,
|
||||
Status: v.GetStatus().String(),
|
||||
Message: v.GetErr().Error(),
|
||||
Step: step,
|
||||
Status: v.GetStatus().String(),
|
||||
}
|
||||
if v.GetErr() != nil {
|
||||
e.Message = v.GetErr().Error()
|
||||
}
|
||||
m[k] = e
|
||||
}
|
||||
|
|
@ -195,14 +197,21 @@ func UpdateClusterConditions(kubeConf *common.KubeConf, step string, result *end
|
|||
}
|
||||
//kubeConf.Conditions = append(kubeConf.Conditions, condition)
|
||||
|
||||
cluster, err := getCluster(kubeConf.ClusterName)
|
||||
cluster, err := getCluster(runtime.ClusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Status.Conditions = append(cluster.Status.Conditions, condition)
|
||||
length := len(cluster.Status.Conditions)
|
||||
if length <= 0 {
|
||||
cluster.Status.Conditions = append(cluster.Status.Conditions, condition)
|
||||
} else if cluster.Status.Conditions[length-1].Step == condition.Step {
|
||||
cluster.Status.Conditions[length-1] = condition
|
||||
} else {
|
||||
cluster.Status.Conditions = append(cluster.Status.Conditions, condition)
|
||||
}
|
||||
|
||||
if _, err := kubeConf.ClientSet.KubekeyV1alpha2().Clusters().UpdateStatus(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := runtime.ClientSet.KubekeyV1alpha2().Clusters().UpdateStatus(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -32,4 +32,3 @@ import (
|
|||
_ "k8s.io/code-generator/cmd/register-gen"
|
||||
_ "k8s.io/code-generator/cmd/set-gen"
|
||||
)
|
||||
|
||||
|
|
|
|||
1
main.go
1
main.go
|
|
@ -82,6 +82,7 @@ func main() {
|
|||
|
||||
if err = (&kubekey.ClusterReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("Cluster"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Cluster")
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ func DoServerSideApply(ctx context.Context, cfg *rest.Config, objectYAML []byte)
|
|||
func DoPatchCluster(client dynamic.Interface, name string, data []byte) error {
|
||||
var gvr = schema.GroupVersionResource{
|
||||
Group: "kubekey.kubesphere.io",
|
||||
Version: "v1alpha1",
|
||||
Version: "v1alpha2",
|
||||
Resource: "clusters",
|
||||
}
|
||||
cfg, err := rest.InClusterConfig()
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func K8sFilesDownloadHTTP(kubeConf *common.KubeConf, filepath, version, arch str
|
|||
docker.GetCmd = kubeConf.Arg.DownloadCommand(docker.Path, docker.Url)
|
||||
crictl.GetCmd = kubeConf.Arg.DownloadCommand(crictl.Path, crictl.Url)
|
||||
|
||||
binaries := []files.KubeBinary{kubeadm, kubelet, kubectl, helm, kubecni, etcd, docker, crictl}
|
||||
binaries := []files.KubeBinary{kubeadm, kubelet, kubectl, helm, kubecni, docker, crictl, etcd}
|
||||
binariesMap := make(map[string]files.KubeBinary)
|
||||
for _, binary := range binaries {
|
||||
logger.Log.Messagef(common.LocalHost, "downloading %s ...", binary.Name)
|
||||
|
|
|
|||
|
|
@ -25,12 +25,12 @@ type Argument struct {
|
|||
KsEnable bool
|
||||
KsVersion string
|
||||
Debug bool
|
||||
SkipCheck bool
|
||||
SkipPullImages bool
|
||||
AddImagesRepo bool
|
||||
DeployLocalStorage bool
|
||||
SourcesDir string
|
||||
DownloadCommand func(path, url string) string
|
||||
SkipConfirmCheck bool
|
||||
InCluster bool
|
||||
ContainerManager string
|
||||
FromCluster bool
|
||||
|
|
@ -86,6 +86,7 @@ func NewKubeRuntime(flag string, arg Argument) (*KubeRuntime, error) {
|
|||
r := &KubeRuntime{
|
||||
ClusterHosts: generateHosts(hostGroups, defaultCluster),
|
||||
Cluster: defaultCluster,
|
||||
ClusterName: cluster.Name,
|
||||
//ClientSet: clientset,
|
||||
Arg: arg,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ type ModuleResult struct {
|
|||
}
|
||||
|
||||
func NewModuleResult() *ModuleResult {
|
||||
return &ModuleResult{HostResults: make(map[string]Interface), Status: NULL}
|
||||
return &ModuleResult{HostResults: make(map[string]Interface), StartTime: time.Now(), Status: NULL}
|
||||
}
|
||||
|
||||
func (m *ModuleResult) IsFailed() bool {
|
||||
|
|
|
|||
|
|
@ -17,15 +17,7 @@ func (u *UpdateCRStatusHook) Try() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
conf := &common.KubeConf{
|
||||
ClusterHosts: kubeRuntime.ClusterHosts,
|
||||
Cluster: kubeRuntime.Cluster,
|
||||
Kubeconfig: kubeRuntime.Kubeconfig,
|
||||
Conditions: kubeRuntime.Conditions,
|
||||
ClientSet: kubeRuntime.ClientSet,
|
||||
Arg: kubeRuntime.Arg,
|
||||
}
|
||||
if err := kubekeycontroller.UpdateClusterConditions(conf, u.Desc, u.Result); err != nil {
|
||||
if err := kubekeycontroller.UpdateClusterConditions(kubeRuntime, u.Desc, u.Result); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
func NewAddNodesPipeline(runtime *common.KubeRuntime) error {
|
||||
m := []module.Module{
|
||||
&precheck.NodePreCheckModule{},
|
||||
&confirm.InstallConfirmModule{},
|
||||
&confirm.InstallConfirmModule{Skip: runtime.Arg.SkipConfirmCheck},
|
||||
&binaries.NodeBinariesModule{},
|
||||
&os.ConfigureOSModule{},
|
||||
&kubernetes.KubernetesStatusModule{},
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func NewCreateClusterPipeline(runtime *common.KubeRuntime) error {
|
|||
|
||||
m := []module.Module{
|
||||
&precheck.NodePreCheckModule{},
|
||||
&confirm.InstallConfirmModule{},
|
||||
&confirm.InstallConfirmModule{Skip: runtime.Arg.SkipConfirmCheck},
|
||||
&binaries.NodeBinariesModule{},
|
||||
&os.ConfigureOSModule{},
|
||||
&kubernetes.KubernetesStatusModule{},
|
||||
|
|
|
|||
Loading…
Reference in New Issue