mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
doc: defined env by struct (#2589)
Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
202700fb43
commit
8c84ea7a33
|
|
@ -36,7 +36,6 @@ const (
|
|||
connectedLocal = "local"
|
||||
connectedKubernetes = "kubernetes"
|
||||
connectedPrometheus = "prometheus"
|
||||
defaultSHELL = "/bin/bash"
|
||||
)
|
||||
|
||||
// Connector is the interface for connecting to a remote host
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ func newKubernetesConnector(host string, workdir string, hostVars map[string]any
|
|||
cmd: exec.New(),
|
||||
clusterName: host,
|
||||
kubeconfig: kubeconfig,
|
||||
shell: defaultSHELL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -94,10 +93,7 @@ func (c *kubernetesConnector) Init(_ context.Context) error {
|
|||
return errors.Wrapf(err, "failed to create kubeconfig file for cluster %q", c.clusterName)
|
||||
}
|
||||
// find command interpreter in env. default /bin/bash
|
||||
sl, ok := os.LookupEnv(_const.ENV_SHELL)
|
||||
if ok {
|
||||
c.shell = sl
|
||||
}
|
||||
c.shell = _const.Getenv(_const.Shell)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,6 @@ func newLocalConnector(workdir string, hostVars map[string]any) *localConnector
|
|||
connector := &localConnector{
|
||||
Password: password,
|
||||
Cmd: exec.New(),
|
||||
shell: defaultSHELL,
|
||||
}
|
||||
// Initialize the cacheGatherFact with a function that will call getHostInfoFromRemote
|
||||
connector.gatherFacts = newCacheGatherFact(_const.VariableLocalHost, cacheType, workdir, connector.getHostInfo)
|
||||
|
|
@ -65,10 +64,7 @@ type localConnector struct {
|
|||
// Init initializes the local connector. This method does nothing for localConnector.
|
||||
func (c *localConnector) Init(context.Context) error {
|
||||
// find command interpreter in env. default /bin/bash
|
||||
sl, ok := os.LookupEnv(_const.ENV_SHELL)
|
||||
if ok {
|
||||
c.shell = sl
|
||||
}
|
||||
c.shell = _const.Getenv(_const.Shell)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -99,7 +99,6 @@ func newSSHConnector(workdir, host string, hostVars map[string]any) *sshConnecto
|
|||
Password: passwdParam,
|
||||
PrivateKey: keyParam,
|
||||
PrivateKeyContent: keycontentParam,
|
||||
shell: defaultSHELL,
|
||||
}
|
||||
|
||||
// Initialize the cacheGatherFact with a function that will call getHostInfoFromRemote
|
||||
|
|
@ -177,6 +176,8 @@ func (c *sshConnector) Init(context.Context) error {
|
|||
|
||||
if strings.TrimSpace(string(output)) != "" {
|
||||
c.shell = strings.TrimSpace(string(output))
|
||||
} else {
|
||||
c.shell = "/bin/bash"
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -92,31 +92,6 @@ const ( // === From runtime ===
|
|||
VariableItem = "item"
|
||||
)
|
||||
|
||||
const ( // === From env ===
|
||||
// ENV_SHELL which shell operator use in local connector.
|
||||
ENV_SHELL = "SHELL"
|
||||
// ENV_EXECUTOR_VERBOSE which verbose use in playbook pod.
|
||||
ENV_EXECUTOR_VERBOSE = "EXECUTOR_VERBOSE"
|
||||
// ENV_EXECUTOR_IMAGE which image use in playbook pod.
|
||||
ENV_EXECUTOR_IMAGE = "EXECUTOR_IMAGE"
|
||||
// ENV_EXECUTOR_IMAGE_PULLPOLICY which imagePolicy use in playbook pod.
|
||||
ENV_EXECUTOR_IMAGE_PULLPOLICY = "EXECUTOR_IMAGE_PULLPOLICY"
|
||||
// ENV_EXECUTOR_CLUSTERROLE which clusterrole use in playbook pod.
|
||||
ENV_EXECUTOR_CLUSTERROLE = "EXECUTOR_CLUSTERROLE"
|
||||
// ENV_CAPKK_GROUP_CONTROLPLANE the control_plane groups for capkk playbook
|
||||
ENV_CAPKK_GROUP_CONTROLPLANE = "CAPKK_GROUP_CONTROLPLANE"
|
||||
// ENV_CAPKK_GROUP_WORKER the worker groups for capkk playbook
|
||||
ENV_CAPKK_GROUP_WORKER = "CAPKK_GROUP_WORKER"
|
||||
// ENV_CAPKK_VOLUME_BINARY is the binary dir for capkk playbook. used in offline installer.
|
||||
// the value should be a pvc name.
|
||||
ENV_CAPKK_VOLUME_BINARY = "CAPKK_VOLUME_BINARY"
|
||||
// ENV_CAPKK_VOLUME_PROJECT is the project dir for capkk playbook. the default project has contained in IMAGE.
|
||||
// the value should be a pvc name.
|
||||
ENV_CAPKK_VOLUME_PROJECT = "CAPKK_VOLUME_PROJECT"
|
||||
// ENV_CAPKK_VOLUME_WORKDIR is the workdir for capkk playbook.
|
||||
ENV_CAPKK_VOLUME_WORKDIR = "CAPKK_VOLUME_WORKDIR"
|
||||
)
|
||||
|
||||
const ( // === From CAPKK base on GetCAPKKProject() ===
|
||||
// CAPKKWorkdir is the work dir for capkk playbook.
|
||||
CAPKKWorkdir = "/kubekey/"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
package _const
|
||||
|
||||
import "os"
|
||||
|
||||
// Environment represents an environment variable with its name and default value
|
||||
type Environment struct {
|
||||
env string // environment variable name
|
||||
def string // default value if environment variable is not set
|
||||
}
|
||||
|
||||
var (
|
||||
// Shell specifies which shell operator uses in local connector
|
||||
Shell = Environment{env: "SHELL", def: "/bin/bash"}
|
||||
|
||||
// ExecutorVerbose specifies the verbosity level used in playbook pod
|
||||
ExecutorVerbose = Environment{env: "EXECUTOR_VERBOSE"}
|
||||
// ExecutorImage specifies the container image used in playbook pod
|
||||
ExecutorImage = Environment{env: "EXECUTOR_IMAGE", def: "docker.io/kubesphere/executor:latest"}
|
||||
// ExecutorImagePullPolicy specifies the image pull policy used in playbook pod
|
||||
ExecutorImagePullPolicy = Environment{env: "EXECUTOR_IMAGE_PULLPOLICY"}
|
||||
// ExecutorClusterRole specifies the cluster role used in playbook pod
|
||||
ExecutorClusterRole = Environment{env: "EXECUTOR_CLUSTERROLE"}
|
||||
|
||||
// CapkkGroupControlPlane specifies the control plane groups for capkk playbook
|
||||
CapkkGroupControlPlane = Environment{env: "CAPKK_GROUP_CONTROLPLANE", def: "kube_control_plane"}
|
||||
// CapkkGroupWorker specifies the worker groups for capkk playbook
|
||||
CapkkGroupWorker = Environment{env: "CAPKK_GROUP_WORKER", def: "kube_worker"}
|
||||
// CapkkVolumeBinary specifies a persistent volume containing the CAPKKBinarydir for capkk playbook, used in offline installer
|
||||
CapkkVolumeBinary = Environment{env: "CAPKK_VOLUME_BINARY"}
|
||||
// CapkkVolumeProject specifies a persistent volume containing the CAPKKProjectdir for capkk playbook
|
||||
CapkkVolumeProject = Environment{env: "CAPKK_VOLUME_PROJECT"}
|
||||
// CapkkVolumeWorkdir specifies the working directory for capkk playbook
|
||||
CapkkVolumeWorkdir = Environment{env: "CAPKK_VOLUME_WORKDIR"}
|
||||
)
|
||||
|
||||
// Getenv retrieves the value of the environment variable. If the environment variable is not set,
|
||||
// it returns the default value specified in the Environment struct.
|
||||
func Getenv(env Environment) string {
|
||||
val, ok := os.LookupEnv(env.env)
|
||||
if !ok {
|
||||
return env.def
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
|
@ -18,7 +18,6 @@ package core
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
|
|
@ -43,9 +42,8 @@ import (
|
|||
|
||||
const (
|
||||
// playbookPodLabel set in pod. value is which playbook belongs to.
|
||||
podPlaybookLabel = "kubekey.kubesphere.io/playbook"
|
||||
defaultExecutorImage = "docker.io/kubesphere/executor:latest"
|
||||
executorContainer = "executor"
|
||||
podPlaybookLabel = "kubekey.kubesphere.io/playbook"
|
||||
executorContainer = "executor"
|
||||
)
|
||||
|
||||
// PlaybookReconciler reconcile playbook
|
||||
|
|
@ -229,7 +227,7 @@ func (r *PlaybookReconciler) dealRunningPlaybook(ctx context.Context, playbook *
|
|||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: executorContainer,
|
||||
Image: defaultExecutorImage,
|
||||
Image: _const.Getenv(_const.ExecutorImage),
|
||||
Command: []string{"kk"},
|
||||
Args: []string{"playbook",
|
||||
"--name", playbook.Name,
|
||||
|
|
@ -244,15 +242,11 @@ func (r *PlaybookReconciler) dealRunningPlaybook(ctx context.Context, playbook *
|
|||
},
|
||||
}
|
||||
// get verbose from env
|
||||
if verbose := os.Getenv(_const.ENV_EXECUTOR_VERBOSE); verbose != "" {
|
||||
if verbose := _const.Getenv(_const.ExecutorVerbose); verbose != "" {
|
||||
pod.Spec.Containers[0].Args = append(pod.Spec.Containers[0].Args, "-v", verbose)
|
||||
}
|
||||
// get image from env
|
||||
if image := os.Getenv(_const.ENV_EXECUTOR_IMAGE); image != "" {
|
||||
pod.Spec.Containers[0].Image = image
|
||||
}
|
||||
// get image from env
|
||||
if imagePullPolicy := os.Getenv(_const.ENV_EXECUTOR_IMAGE_PULLPOLICY); imagePullPolicy != "" {
|
||||
// get ImagePullPolicy from env
|
||||
if imagePullPolicy := _const.Getenv(_const.ExecutorImagePullPolicy); imagePullPolicy != "" {
|
||||
pod.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
|
||||
}
|
||||
if err := ctrl.SetControllerReference(playbook, pod, r.Client.Scheme()); err != nil {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package core
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
|
|
@ -60,9 +59,9 @@ func (w *PlaybookWebhook) Default(ctx context.Context, obj runtime.Object) error
|
|||
if !ok {
|
||||
return errors.Errorf("failed to convert %q to playbooks", obj.GetObjectKind().GroupVersionKind().String())
|
||||
}
|
||||
if playbook.Spec.ServiceAccountName == "" && os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE) != "" {
|
||||
if playbook.Spec.ServiceAccountName == "" && _const.Getenv(_const.ExecutorClusterRole) != "" {
|
||||
// should create default service account in current namespace
|
||||
if err := w.syncServiceAccount(ctx, playbook, os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE)); err != nil {
|
||||
if err := w.syncServiceAccount(ctx, playbook, _const.Getenv(_const.ExecutorClusterRole)); err != nil {
|
||||
return err
|
||||
}
|
||||
playbook.Spec.ServiceAccountName = defaultServiceAccountName
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package infrastructure
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
|
||||
|
|
@ -23,27 +22,16 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultGroupControlPlane = "kube_control_plane"
|
||||
defaultGroupWorker = "kube_worker"
|
||||
defaultClusterGroup = "k8s_cluster"
|
||||
defaultGroupWorker = "kube_worker"
|
||||
defaultClusterGroup = "k8s_cluster"
|
||||
)
|
||||
|
||||
func getControlPlaneGroupName() string {
|
||||
groupName := os.Getenv(_const.ENV_CAPKK_GROUP_CONTROLPLANE)
|
||||
if groupName == "" {
|
||||
groupName = defaultGroupControlPlane
|
||||
}
|
||||
|
||||
return groupName
|
||||
return _const.Getenv(_const.CapkkGroupControlPlane)
|
||||
}
|
||||
|
||||
func getWorkerGroupName() string {
|
||||
groupName := os.Getenv(_const.ENV_CAPKK_GROUP_WORKER)
|
||||
if groupName == "" {
|
||||
groupName = defaultGroupWorker
|
||||
}
|
||||
|
||||
return groupName
|
||||
return _const.Getenv(_const.CapkkGroupWorker)
|
||||
}
|
||||
|
||||
// the cluster resource in kubernetes. only contains the single resource for cluster.
|
||||
|
|
@ -148,7 +136,7 @@ func (p *clusterScope) getVolumeMounts(ctx context.Context) ([]corev1.Volume, []
|
|||
volumeMounts := make([]corev1.VolumeMount, 0)
|
||||
volumes := make([]corev1.Volume, 0)
|
||||
|
||||
if binaryPVC := os.Getenv(_const.ENV_CAPKK_VOLUME_BINARY); binaryPVC != "" {
|
||||
if binaryPVC := _const.Getenv(_const.CapkkVolumeBinary); binaryPVC != "" {
|
||||
volumeMounts = append(volumeMounts, corev1.VolumeMount{
|
||||
Name: "kubekey",
|
||||
MountPath: _const.CAPKKBinarydir,
|
||||
|
|
@ -162,7 +150,7 @@ func (p *clusterScope) getVolumeMounts(ctx context.Context) ([]corev1.Volume, []
|
|||
},
|
||||
})
|
||||
}
|
||||
if projectPVC := os.Getenv(_const.ENV_CAPKK_VOLUME_PROJECT); projectPVC != "" {
|
||||
if projectPVC := _const.Getenv(_const.CapkkVolumeProject); projectPVC != "" {
|
||||
volumeMounts = append(volumeMounts, corev1.VolumeMount{
|
||||
Name: "project",
|
||||
MountPath: _const.CAPKKProjectdir,
|
||||
|
|
@ -177,7 +165,7 @@ func (p *clusterScope) getVolumeMounts(ctx context.Context) ([]corev1.Volume, []
|
|||
},
|
||||
})
|
||||
}
|
||||
if workdirPVC := os.Getenv(_const.ENV_CAPKK_VOLUME_WORKDIR); workdirPVC != "" {
|
||||
if workdirPVC := _const.Getenv(_const.CapkkVolumeWorkdir); workdirPVC != "" {
|
||||
volumeMounts = append(volumeMounts, corev1.VolumeMount{
|
||||
Name: "workdir",
|
||||
MountPath: _const.CAPKKWorkdir,
|
||||
|
|
|
|||
Loading…
Reference in New Issue