make kubekey namespace configurable

Signed-off-by: Ben Ye <ben.ye@bytedance.com>
This commit is contained in:
Ben Ye 2022-04-07 11:16:57 -07:00
parent 999a773417
commit 6318e88346
7 changed files with 26 additions and 19 deletions

View File

@ -76,6 +76,7 @@ func (o *AddNodesOptions) Run() error {
ContainerManager: o.ContainerManager,
Artifact: o.Artifact,
InstallPackages: o.InstallPackages,
Namespace: o.CommonOptions.Namespace,
}
return pipelines.AddNodes(arg, o.DownloadCmd)
}

View File

@ -117,6 +117,7 @@ func (o *CreateClusterOptions) Run() error {
ContainerManager: o.ContainerManager,
Artifact: o.Artifact,
InstallPackages: o.InstallPackages,
Namespace: o.CommonOptions.Namespace,
}
if o.localStorageChanged {

View File

@ -25,6 +25,7 @@ type CommonOptions struct {
Verbose bool
SkipConfirmCheck bool
IgnoreErr bool
Namespace string
}
func NewCommonOptions() *CommonOptions {
@ -36,4 +37,5 @@ func (o *CommonOptions) AddCommonFlag(cmd *cobra.Command) {
cmd.Flags().BoolVar(&o.Verbose, "debug", false, "Print detailed information")
cmd.Flags().BoolVarP(&o.SkipConfirmCheck, "yes", "y", false, "Skip confirm check")
cmd.Flags().BoolVar(&o.IgnoreErr, "ignore-err", false, "Ignore the error message, remove the host which reported error and force to continue")
cmd.Flags().StringVar(&o.Namespace, "namespace", "kubekey-system", "KubeKey namespace to use")
}

View File

@ -113,7 +113,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
// Check if the configMap already exists
if err := r.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: "kubekey-system"}, cmFound); err == nil {
if err := r.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: req.Namespace}, cmFound); err == nil {
clusterAlreadyExist = true
}
@ -219,7 +219,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
// Synchronizing Node Information
kubeConfigCm := &corev1.ConfigMap{}
if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-kubeconfig", cluster.Name), Namespace: "kubekey-system"}, kubeConfigCm); err == nil && len(cluster.Status.Nodes) != 0 {
if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-kubeconfig", cluster.Name), Namespace: req.Namespace}, kubeConfigCm); err == nil && len(cluster.Status.Nodes) != 0 {
// fixme: this code will delete the kube config configmap when user delete the CR cluster.
// And if user apply this deleted CR cluster again, kk will no longer be able to find the kube config.
@ -365,7 +365,7 @@ func (r *ClusterReconciler) configMapForCluster(c *kubekeyv1alpha2.Cluster) *cor
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: "kubekey-system",
Namespace: c.Namespace,
Labels: map[string]string{"kubekey.kubesphere.io/name": c.Name},
OwnerReferences: []metav1.OwnerReference{{
APIVersion: c.APIVersion,
@ -387,15 +387,15 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
)
if action == CreateCluster {
name = fmt.Sprintf("%s-create-cluster", c.Name)
args = []string{"create", "cluster", "-f", "/home/kubekey/config/cluster.yaml", "-y", "--in-cluster", "true"}
args = []string{"create", "cluster", "-f", "/home/kubekey/config/cluster.yaml", "-y", "--in-cluster", "true", "--namespace", c.Namespace}
} else if action == AddNodes {
name = fmt.Sprintf("%s-add-nodes", c.Name)
args = []string{"add", "nodes", "-f", "/home/kubekey/config/cluster.yaml", "-y", "--in-cluster", "true", "--ignore-err", "true"}
args = []string{"add", "nodes", "-f", "/home/kubekey/config/cluster.yaml", "-y", "--in-cluster", "true", "--ignore-err", "true", "--namespace", c.Namespace}
}
podlist := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace("kubekey-system"),
client.InNamespace(c.Namespace),
client.MatchingLabels{"control-plane": "controller-manager"},
}
err := r.List(context.TODO(), podlist, listOpts...)
@ -418,7 +418,7 @@ func (r *ClusterReconciler) jobForCluster(c *kubekeyv1alpha2.Cluster, action str
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "kubekey-system",
Namespace: c.Namespace,
Labels: map[string]string{"kubekey.kubesphere.io/name": c.Name},
OwnerReferences: []metav1.OwnerReference{{
APIVersion: c.APIVersion,
@ -514,7 +514,7 @@ func updateStatusRunner(r *ClusterReconciler, req ctrl.Request, cluster *kubekey
podlist := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace("kubekey-system"),
client.InNamespace(req.Namespace),
client.MatchingLabels{"job-name": name},
}
for i := 0; i < 100; i++ {
@ -530,7 +530,7 @@ func updateStatusRunner(r *ClusterReconciler, req ctrl.Request, cluster *kubekey
if len(podlist.Items[0].ObjectMeta.GetName()) != 0 && len(podlist.Items[0].Status.ContainerStatuses[0].Name) != 0 && podlist.Items[0].Status.Phase != "Pending" {
cluster.Status.JobInfo = kubekeyv1alpha2.JobInfo{
Namespace: "kubekey-system",
Namespace: req.Namespace,
Name: name,
Pods: []kubekeyv1alpha2.PodInfo{{
Name: podlist.Items[0].ObjectMeta.GetName(),
@ -553,7 +553,7 @@ func updateStatusRunner(r *ClusterReconciler, req ctrl.Request, cluster *kubekey
func updateClusterConfigMap(r *ClusterReconciler, ctx context.Context, cluster *kubekeyv1alpha2.Cluster, cmFound *corev1.ConfigMap, log logr.Logger) error {
// Check if the configmap already exists, if not create a new one
if err := r.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: "kubekey-system"}, cmFound); err != nil && !kubeErr.IsNotFound(err) {
if err := r.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cmFound); err != nil && !kubeErr.IsNotFound(err) {
log.Error(err, "Failed to get ConfigMap", "ConfigMap.Namespace", cmFound.Namespace, "ConfigMap.Name", cmFound.Name)
return err
} else if err == nil {
@ -584,13 +584,13 @@ func updateRunJob(r *ClusterReconciler, req ctrl.Request, ctx context.Context, c
}
// Check if the job already exists, if not create a new one
if err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: "kubekey-system"}, jobFound); err != nil && !kubeErr.IsNotFound(err) {
if err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: req.Namespace}, jobFound); err != nil && !kubeErr.IsNotFound(err) {
return err
} else if err == nil && (jobFound.Status.Failed != 0 || jobFound.Status.Succeeded != 0) {
// delete old pods
podlist := &corev1.PodList{}
listOpts := []client.ListOption{
client.InNamespace("kubekey-system"),
client.InNamespace(req.Namespace),
client.MatchingLabels{"job-name": name},
}
if err := r.List(context.TODO(), podlist, listOpts...); err == nil && len(podlist.Items) != 0 {
@ -607,7 +607,7 @@ func updateRunJob(r *ClusterReconciler, req ctrl.Request, ctx context.Context, c
err := wait.PollInfinite(1*time.Second, func() (bool, error) {
log.Info("Checking old job is deleted", "Job.Namespace", jobFound.Namespace, "Job.Name", jobFound.Name)
if e := r.Get(ctx, types.NamespacedName{Name: name, Namespace: "kubekey-system"}, jobFound); e != nil {
if e := r.Get(ctx, types.NamespacedName{Name: name, Namespace: req.Namespace}, jobFound); e != nil {
if kubeErr.IsNotFound(e) {
return true, nil
}
@ -858,7 +858,7 @@ func otherClusterDiff(r *ClusterReconciler, ctx context.Context, c *kubekeyv1alp
}
kubeConfigFound := &corev1.ConfigMap{}
if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-kubeconfig", c.Name), Namespace: "kubekey-system"}, kubeConfigFound); err != nil {
if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-kubeconfig", c.Name), Namespace: c.Namespace}, kubeConfigFound); err != nil {
if kubeErr.IsNotFound(err) {
return newNodes, nil
}

View File

@ -262,7 +262,7 @@ func getClusterClientSet(runtime *common.KubeRuntime) (*kube.Clientset, error) {
cm, err := clientset.
CoreV1().
ConfigMaps("kubekey-system").
ConfigMaps(runtime.Arg.Namespace).
Get(context.TODO(), fmt.Sprintf("%s-kubeconfig", runtime.ClusterName), metav1.GetOptions{})
if err != nil {
return nil, err
@ -386,7 +386,7 @@ func SaveKubeConfig(runtime *common.KubeRuntime) error {
if err != nil {
return err
}
cmClientset := clientset.CoreV1().ConfigMaps("kubekey-system")
cmClientset := clientset.CoreV1().ConfigMaps(runtime.Arg.Namespace)
if _, err := cmClientset.Get(context.TODO(), fmt.Sprintf("%s-kubeconfig", runtime.ClusterName), metav1.GetOptions{}); err != nil {
if kubeErr.IsNotFound(err) {

View File

@ -51,9 +51,11 @@ func init() {
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var (
metricsAddr string
enableLeaderElection bool
probeAddr string
)
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,

View File

@ -53,6 +53,7 @@ type Argument struct {
Artifact string
InstallPackages bool
ImagesDir string
Namespace string
}
func NewKubeRuntime(flag string, arg Argument) (*KubeRuntime, error) {