remove kubesphere type

This commit is contained in:
pixiake 2020-06-09 12:51:53 +08:00
parent b8f97aa451
commit d06856450c
7 changed files with 237 additions and 350 deletions

211
cmd/manager/main.go Normal file
View File

@ -0,0 +1,211 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"os"
"runtime"
"strings"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"github.com/kubesphere/kubekey/pkg/apis"
"github.com/kubesphere/kubekey/pkg/controller"
"github.com/kubesphere/kubekey/version"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")
func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
func main() {
// Add the zap logger flag set to the CLI. The flag set must
// be added before calling pflag.Parse().
pflag.CommandLine.AddFlagSet(zap.FlagSet())
// Add flags registered by imported packages (e.g. glog and
// controller-runtime)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
//
// The logger instantiated here can be changed to any logger
// implementing the logr.Logger interface. This logger will
// be propagated through the whole operator, generating
// uniform and structured logs.
logf.SetLogger(zap.Logger())
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
log.Error(err, "Failed to get watch namespace")
os.Exit(1)
}
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
ctx := context.TODO()
// Become the leader before proceeding
err = leader.Become(ctx, "kubekey-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
}
// Set default manager options
options := manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
}
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
}
// Create a new manager to provide shared dependencies and start components
mgr, err := manager.New(cfg, options)
if err != nil {
log.Error(err, "")
os.Exit(1)
}
log.Info("Registering Components.")
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Setup all Controllers
if err := controller.AddToManager(mgr); err != nil {
log.Error(err, "")
os.Exit(1)
}
// Add the Metrics Service
addMetrics(ctx, cfg)
log.Info("Starting the Cmd.")
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "Manager exited non-zero")
os.Exit(1)
}
}
// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using
// the Prometheus operator
func addMetrics(ctx context.Context, cfg *rest.Config) {
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
if errors.Is(err, k8sutil.ErrRunLocal) {
log.Info("Skipping CR metrics server creation; not running in a cluster.")
return
}
}
if err := serveCRMetrics(cfg, operatorNs); err != nil {
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
// The ServiceMonitor is created in the same namespace where the operator is deployed
_, err = metrics.CreateServiceMonitors(cfg, operatorNs, services)
if err != nil {
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config, operatorNs string) error {
// The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below
// with your own custom logic. Note that if you are adding third party API schemas, probably you will need to
// customize this implementation to avoid permissions issues.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// The metrics will be generated from the namespaces which are returned here.
// NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error.
ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs)
if err != nil {
return err
}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}

View File

@ -40,7 +40,6 @@ type ClusterSpec struct {
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
Registry RegistryConfig `yaml:"registry" json:"registry,omitempty"`
Storage Storage `yaml:"storage" json:"storage,omitempty"`
KubeSphere KubeSphere `yaml:"kubesphere" json:"kubephere,omitempty"`
}
type Kubernetes struct {

View File

@ -58,7 +58,6 @@ func (cfg *ClusterSpec) SetDefaultClusterSpec() (*ClusterSpec, *HostGroups) {
clusterCfg.Kubernetes = SetDefaultClusterCfg(cfg)
clusterCfg.Registry = cfg.Registry
clusterCfg.Storage = SetDefaultStorageCfg(cfg)
clusterCfg.KubeSphere = cfg.KubeSphere
if cfg.Kubernetes.ImageRepo == "" {
clusterCfg.Kubernetes.ImageRepo = DefaultKubeImageRepo
}

View File

@ -1,106 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
type KubeSphere struct {
Console Console `yaml:"console" json:"console"`
Common Common `yaml:"common" json:"common"`
Openpitrix Openpitrix `yaml:"openpitrix" json:"openpitrix"`
Monitoring Monitoring `yaml:"monitoring" json:"monitoring"`
Logging Logging `yaml:"logging" json:"logging"`
Devops Devops `yaml:"devops" json:"devops"`
Notification Notification `yaml:"notification" json:"notification"`
Alerting Alerting `yaml:"alerting" json:"alerting"`
ServiceMesh ServiceMesh `yaml:"serviceMesh" json:"serviceMesh"`
MetricsServer MetricsServer `yaml:"metricsServer" json:"metricsServer"`
}
type Console struct {
EnableMultiLogin bool `yaml:"enableMultiLogin" json:"enableMultiLogin"`
Port int `yaml:"port" json:"port"`
}
type Common struct {
MysqlVolumeSize string `yaml:"mysqlVolumeSize" json:"mysqlVolumeSize"`
MinioVolumeSize string `yaml:"minioVolumeSize" json:"minioVolumeSize"`
EtcdVolumeSize string `yaml:"etcdVolumeSize" json:"etcdVolumeSize"`
OpenldapVolumeSize string `yaml:"openldapVolumeSize" json:"openldapVolumeSize"`
RedisVolumSize string `yaml:"redisVolumSize" json:"redisVolumSize"`
}
type Monitoring struct {
PrometheusReplicas int `yaml:"prometheusReplicas" json:"prometheusReplicas"`
PrometheusMemoryRequest string `yaml:"prometheusMemoryRequest" json:"prometheusMemoryRequest"`
PrometheusVolumeSize string `yaml:"prometheusVolumeSize" json:"prometheusVolumeSize"`
Grafana Grafana `yaml:"grafana" json:"grafana"`
}
type Grafana struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type Logging struct {
Enabled bool `yaml:"enabled" json:"enabled"`
ElasticsearchMasterReplicas int `yaml:"elasticsearchMasterReplicas" json:"elasticsearchMasterReplicas"`
ElasticsearchDataReplicas int `yaml:"elasticsearchDataReplicas" json:"elasticsearchDataReplicas"`
LogsidecarReplicas int `yaml:"logsidecarReplicas" json:"logsidecarReplicas"`
ElasticsearchMasterVolumeSize string `yaml:"elasticsearchMasterVolumeSize" json:"elasticsearchMasterVolumeSize"`
ElasticsearchDataVolumeSize string `yaml:"elasticsearchDataVolumeSize" json:"elasticsearchDataVolumeSize"`
LogMaxAge int `yaml:"logMaxAge" json:"logMaxAge"`
ElkPrefix string `yaml:"elkPrefix" json:"elkPrefix"`
Kibana Kibana `yaml:"kibana" json:"kibana"`
}
type Kibana struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type Devops struct {
Enabled bool `yaml:"enabled" json:"enabled"`
JenkinsMemoryLim string `yaml:"jenkinsMemoryLim" json:"jenkinsMemoryLim"`
JenkinsMemoryReq string `yaml:"jenkinsMemoryReq" json:"jenkinsMemoryReq"`
JenkinsVolumeSize string `yaml:"jenkinsVolumeSize" json:"jenkinsVolumeSize"`
JenkinsJavaOptsXms string `yaml:"jenkinsJavaOptsXms" json:"jenkinsJavaOptsXms"`
JenkinsJavaOptsXmx string `yaml:"jenkinsJavaOptsXmx" json:"jenkinsJavaOptsXmx"`
JenkinsJavaOptsMaxRAM string `yaml:"jenkinsJavaOptsMaxRAM" json:"jenkinsJavaOptsMaxRAM"`
Sonarqube Sonarqube `yaml:"sonarqube" json:"sonarqube"`
}
type Sonarqube struct {
Enabled bool `yaml:"enabled" json:"enabled"`
PostgresqlVolumeSize string `yaml:"postgresqlVolumeSize" json:"postgresqlVolumeSize"`
}
type Openpitrix struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type ServiceMesh struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type Notification struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type Alerting struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}
type MetricsServer struct {
Enabled bool `yaml:"enabled" json:"enabled"`
}

View File

@ -8,22 +8,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Alerting) DeepCopyInto(out *Alerting) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting.
func (in *Alerting) DeepCopy() *Alerting {
if in == nil {
return nil
}
out := new(Alerting)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CephRBD) DeepCopyInto(out *CephRBD) {
*out = *in
@ -120,7 +104,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
out.Network = in.Network
in.Registry.DeepCopyInto(&out.Registry)
in.Storage.DeepCopyInto(&out.Storage)
out.KubeSphere = in.KubeSphere
return
}
@ -150,38 +133,6 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Common) DeepCopyInto(out *Common) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Common.
func (in *Common) DeepCopy() *Common {
if in == nil {
return nil
}
out := new(Common)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Console) DeepCopyInto(out *Console) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
func (in *Console) DeepCopy() *Console {
if in == nil {
return nil
}
out := new(Console)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControlPlaneEndpoint) DeepCopyInto(out *ControlPlaneEndpoint) {
*out = *in
@ -198,23 +149,6 @@ func (in *ControlPlaneEndpoint) DeepCopy() *ControlPlaneEndpoint {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Devops) DeepCopyInto(out *Devops) {
*out = *in
out.Sonarqube = in.Sonarqube
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Devops.
func (in *Devops) DeepCopy() *Devops {
if in == nil {
return nil
}
out := new(Devops)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = *in
@ -252,22 +186,6 @@ func (in *GlusterFS) DeepCopy() *GlusterFS {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Grafana) DeepCopyInto(out *Grafana) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Grafana.
func (in *Grafana) DeepCopy() *Grafana {
if in == nil {
return nil
}
out := new(Grafana)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostCfg) DeepCopyInto(out *HostCfg) {
*out = *in
@ -330,48 +248,6 @@ func (in *HostGroups) DeepCopy() *HostGroups {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Kibana) DeepCopyInto(out *Kibana) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kibana.
func (in *Kibana) DeepCopy() *Kibana {
if in == nil {
return nil
}
out := new(Kibana)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeSphere) DeepCopyInto(out *KubeSphere) {
*out = *in
out.Console = in.Console
out.Common = in.Common
out.Openpitrix = in.Openpitrix
out.Monitoring = in.Monitoring
out.Logging = in.Logging
out.Devops = in.Devops
out.Notification = in.Notification
out.Alerting = in.Alerting
out.ServiceMesh = in.ServiceMesh
out.MetricsServer = in.MetricsServer
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSphere.
func (in *KubeSphere) DeepCopy() *KubeSphere {
if in == nil {
return nil
}
out := new(KubeSphere)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Kubernetes) DeepCopyInto(out *Kubernetes) {
*out = *in
@ -404,56 +280,6 @@ func (in *LocalVolume) DeepCopy() *LocalVolume {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Logging) DeepCopyInto(out *Logging) {
*out = *in
out.Kibana = in.Kibana
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging.
func (in *Logging) DeepCopy() *Logging {
if in == nil {
return nil
}
out := new(Logging)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsServer) DeepCopyInto(out *MetricsServer) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServer.
func (in *MetricsServer) DeepCopy() *MetricsServer {
if in == nil {
return nil
}
out := new(MetricsServer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Monitoring) DeepCopyInto(out *Monitoring) {
*out = *in
out.Grafana = in.Grafana
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring.
func (in *Monitoring) DeepCopy() *Monitoring {
if in == nil {
return nil
}
out := new(Monitoring)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkConfig) DeepCopyInto(out *NetworkConfig) {
*out = *in
@ -486,38 +312,6 @@ func (in *NfsClient) DeepCopy() *NfsClient {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Notification) DeepCopyInto(out *Notification) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Notification.
func (in *Notification) DeepCopy() *Notification {
if in == nil {
return nil
}
out := new(Notification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Openpitrix) DeepCopyInto(out *Openpitrix) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Openpitrix.
func (in *Openpitrix) DeepCopy() *Openpitrix {
if in == nil {
return nil
}
out := new(Openpitrix)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RegistryConfig) DeepCopyInto(out *RegistryConfig) {
*out = *in
@ -575,38 +369,6 @@ func (in *RoleGroups) DeepCopy() *RoleGroups {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceMesh) DeepCopyInto(out *ServiceMesh) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMesh.
func (in *ServiceMesh) DeepCopy() *ServiceMesh {
if in == nil {
return nil
}
out := new(ServiceMesh)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sonarqube) DeepCopyInto(out *Sonarqube) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sonarqube.
func (in *Sonarqube) DeepCopy() *Sonarqube {
if in == nil {
return nil
}
out := new(Sonarqube)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Storage) DeepCopyInto(out *Storage) {
*out = *in

View File

@ -123,11 +123,15 @@ EOF
}
if mgr.Cluster.Registry.PrivateRegistry != "" {
configMapBase64, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("sed -i \"/local_registry/s/\\:.*/\\: %s/g\" %s", mgr.Cluster.Registry.PrivateRegistry, configMap)).CombinedOutput()
_, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("sed -i \"/local_registry/s/\\:.*/\\: %s/g\" %s", mgr.Cluster.Registry.PrivateRegistry, configMap)).CombinedOutput()
if err != nil {
fmt.Println(string(configMapBase64))
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("Failed to add private registry: %s", mgr.Cluster.Registry.PrivateRegistry))
}
} else {
_, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("sed -i '/local_registry/d' %s", configMap)).CombinedOutput()
if err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("Failed to remove private registry"))
}
}
configMapBase64, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("cat %s | base64 --wrap=0", configMap)).CombinedOutput()

View File

@ -110,23 +110,27 @@ data:
endpointIps: 192.168.0.7,192.168.0.8,192.168.0.9
port: 2379
tlsEnable: True
common:
mysqlVolumeSize: 20Gi
minioVolumeSize: 20Gi
etcdVolumeSize: 20Gi
openldapVolumeSize: 2Gi
redisVolumSize: 2Gi
metrics_server:
enabled: False
console:
enableMultiLogin: False # enable/disable multi login
port: 30880
monitoring:
prometheusReplicas: 1
prometheusMemoryRequest: 400Mi
prometheusVolumeSize: 20Gi
grafana:
enabled: False
notification:
enabled: False
logging:
enabled: False
elasticsearchMasterReplicas: 1
@ -139,8 +143,13 @@ data:
containersLogMountedPath: ""
kibana:
enabled: False
events:
enabled: False
openpitrix:
enabled: False
devops:
enabled: False
jenkinsMemoryLim: 2Gi
@ -152,13 +161,22 @@ data:
sonarqube:
enabled: False
postgresqlVolumeSize: 8Gi
servicemesh:
enabled: False
notification:
enabled: False
alerting:
enabled: False
metrics_server:
enabled: False
weave_scope:
enabled: False
kind: ConfigMap
metadata:
name: ks-installer