mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
add K2Cluster object
This commit is contained in:
parent
de409f9dd0
commit
107205c14c
|
|
@ -0,0 +1,15 @@
|
|||
FROM registry.access.redhat.com/ubi8/ubi-minimal:latest
|
||||
|
||||
ENV OPERATOR=/usr/local/bin/kubekey \
|
||||
USER_UID=1001 \
|
||||
USER_NAME=kubekey
|
||||
|
||||
# install operator binary
|
||||
COPY build/_output/bin/kubekey ${OPERATOR}
|
||||
|
||||
COPY build/bin /usr/local/bin
|
||||
RUN /usr/local/bin/user_setup
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint"]
|
||||
|
||||
USER ${USER_UID}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh -e
|
||||
|
||||
exec ${OPERATOR} $@
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/sh
|
||||
set -x
|
||||
|
||||
# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
|
||||
echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd
|
||||
mkdir -p "${HOME}"
|
||||
chown "${USER_UID}:0" "${HOME}"
|
||||
chmod ug+rwx "${HOME}"
|
||||
|
||||
# no need for this script to remain in the image after running
|
||||
rm "$0"
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/pixiake/kubekey/pkg/apis"
|
||||
"github.com/pixiake/kubekey/pkg/controller"
|
||||
"github.com/pixiake/kubekey/version"
|
||||
|
||||
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
|
||||
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
|
||||
"github.com/operator-framework/operator-sdk/pkg/leader"
|
||||
"github.com/operator-framework/operator-sdk/pkg/log/zap"
|
||||
"github.com/operator-framework/operator-sdk/pkg/metrics"
|
||||
sdkVersion "github.com/operator-framework/operator-sdk/version"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
)
|
||||
|
||||
// Change below variables to serve metrics on different host or port.
|
||||
var (
|
||||
metricsHost = "0.0.0.0"
|
||||
metricsPort int32 = 8383
|
||||
operatorMetricsPort int32 = 8686
|
||||
)
|
||||
var log = logf.Log.WithName("cmd")
|
||||
|
||||
func printVersion() {
|
||||
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
|
||||
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
|
||||
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
|
||||
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Add the zap logger flag set to the CLI. The flag set must
|
||||
// be added before calling pflag.Parse().
|
||||
pflag.CommandLine.AddFlagSet(zap.FlagSet())
|
||||
|
||||
// Add flags registered by imported packages (e.g. glog and
|
||||
// controller-runtime)
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
pflag.Parse()
|
||||
|
||||
// Use a zap logr.Logger implementation. If none of the zap
|
||||
// flags are configured (or if the zap flag set is not being
|
||||
// used), this defaults to a production zap logger.
|
||||
//
|
||||
// The logger instantiated here can be changed to any logger
|
||||
// implementing the logr.Logger interface. This logger will
|
||||
// be propagated through the whole operator, generating
|
||||
// uniform and structured logs.
|
||||
logf.SetLogger(zap.Logger())
|
||||
|
||||
printVersion()
|
||||
|
||||
namespace, err := k8sutil.GetWatchNamespace()
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get watch namespace")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get a config to talk to the apiserver
|
||||
cfg, err := config.GetConfig()
|
||||
if err != nil {
|
||||
log.Error(err, "")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
// Become the leader before proceeding
|
||||
err = leader.Become(ctx, "kubekey-lock")
|
||||
if err != nil {
|
||||
log.Error(err, "")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Set default manager options
|
||||
options := manager.Options{
|
||||
Namespace: namespace,
|
||||
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
|
||||
}
|
||||
|
||||
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
|
||||
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
|
||||
// Also note that you may face performance issues when using this with a high number of namespaces.
|
||||
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
|
||||
if strings.Contains(namespace, ",") {
|
||||
options.Namespace = ""
|
||||
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
|
||||
}
|
||||
|
||||
// Create a new manager to provide shared dependencies and start components
|
||||
mgr, err := manager.New(cfg, options)
|
||||
if err != nil {
|
||||
log.Error(err, "")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.Info("Registering Components.")
|
||||
|
||||
// Setup Scheme for all resources
|
||||
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
log.Error(err, "")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Setup all Controllers
|
||||
if err := controller.AddToManager(mgr); err != nil {
|
||||
log.Error(err, "")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the Metrics Service
|
||||
addMetrics(ctx, cfg)
|
||||
|
||||
log.Info("Starting the Cmd.")
|
||||
|
||||
// Start the Cmd
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
log.Error(err, "Manager exited non-zero")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using
|
||||
// the Prometheus operator
|
||||
func addMetrics(ctx context.Context, cfg *rest.Config) {
|
||||
// Get the namespace the operator is currently deployed in.
|
||||
operatorNs, err := k8sutil.GetOperatorNamespace()
|
||||
if err != nil {
|
||||
if errors.Is(err, k8sutil.ErrRunLocal) {
|
||||
log.Info("Skipping CR metrics server creation; not running in a cluster.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := serveCRMetrics(cfg, operatorNs); err != nil {
|
||||
log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
|
||||
}
|
||||
|
||||
// Add to the below struct any other metrics ports you want to expose.
|
||||
servicePorts := []v1.ServicePort{
|
||||
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
|
||||
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
|
||||
}
|
||||
|
||||
// Create Service object to expose the metrics port(s).
|
||||
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
|
||||
if err != nil {
|
||||
log.Info("Could not create metrics Service", "error", err.Error())
|
||||
}
|
||||
|
||||
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
|
||||
// necessary to configure Prometheus to scrape metrics from this operator.
|
||||
services := []*v1.Service{service}
|
||||
|
||||
// The ServiceMonitor is created in the same namespace where the operator is deployed
|
||||
_, err = metrics.CreateServiceMonitors(cfg, operatorNs, services)
|
||||
if err != nil {
|
||||
log.Info("Could not create ServiceMonitor object", "error", err.Error())
|
||||
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
|
||||
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
|
||||
if err == metrics.ErrServiceMonitorNotPresent {
|
||||
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
|
||||
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
|
||||
func serveCRMetrics(cfg *rest.Config, operatorNs string) error {
|
||||
// The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below
|
||||
// with your own custom logic. Note that if you are adding third party API schemas, probably you will need to
|
||||
// customize this implementation to avoid permissions issues.
|
||||
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The metrics will be generated from the namespaces which are returned here.
|
||||
// NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error.
|
||||
ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate and serve custom resource specific metrics.
|
||||
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: k2clusters.kubekey.io
|
||||
spec:
|
||||
group: kubekey.io
|
||||
names:
|
||||
kind: K2Cluster
|
||||
listKind: K2ClusterList
|
||||
plural: k2clusters
|
||||
singular: k2cluster
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: K2Cluster is the Schema for the k2clusters API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: K2ClusterSpec defines the desired state of K2Cluster
|
||||
type: object
|
||||
status:
|
||||
description: K2ClusterStatus defines the observed state of K2Cluster
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: kubekey.io/v1alpha1
|
||||
kind: K2Cluster
|
||||
metadata:
|
||||
name: example-k2cluster
|
||||
spec:
|
||||
# Add fields here
|
||||
size: 3
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubekey
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubekey
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubekey
|
||||
spec:
|
||||
serviceAccountName: kubekey
|
||||
containers:
|
||||
- name: kubekey
|
||||
# Replace this with the built image name
|
||||
image: REPLACE_IMAGE
|
||||
command:
|
||||
- kubekey
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "kubekey"
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubekey
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- services/finalizers
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- servicemonitors
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- kubekey
|
||||
resources:
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- replicasets
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- kubekey.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubekey
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubekey
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: kubekey
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kubekey
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
module github.com/pixiake/kubekey
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/lithammer/dedent v1.1.0
|
||||
github.com/operator-framework/operator-sdk v0.17.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/sirupsen/logrus v1.5.0
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/tmc/scp v0.0.0-20170824174625-f7b48647feef
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.17.4
|
||||
k8s.io/apimachinery v0.17.4
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
sigs.k8s.io/controller-runtime v0.5.2
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM
|
||||
k8s.io/client-go => k8s.io/client-go v0.17.4 // Required by prometheus-operator
|
||||
)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
package apis
|
||||
|
||||
import (
|
||||
"github.com/pixiake/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
|
||||
AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
package apis
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// AddToSchemes may be used to add all resources defined in the project to a Scheme
|
||||
var AddToSchemes runtime.SchemeBuilder
|
||||
|
||||
// AddToScheme adds all Resources to the Scheme
|
||||
func AddToScheme(s *runtime.Scheme) error {
|
||||
return AddToSchemes.AddToScheme(s)
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
// Package kubekey contains kubekey API versions.
|
||||
//
|
||||
// This file ensures Go source parsers acknowledge the kubekey package
|
||||
// and any child packages. It can be removed if any other Go source files are
|
||||
// added to this package.
|
||||
package kubekey
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
// Package v1alpha1 contains API Schema definitions for the kubekey v1alpha1 API group
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=kubekey.io
|
||||
package v1alpha1
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pixiake/kubekey/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// K2ClusterSpec defines the desired state of K2Cluster
|
||||
type K2ClusterSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
|
||||
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
|
||||
Hosts []HostCfg `yaml:"hosts" json:"hosts,omitempty"`
|
||||
LBKubeApiserver LBKubeApiserverCfg `yaml:"lbKubeapiserver" json:"lbKubeapiserver,omitempty"`
|
||||
KubeCluster KubeCluster `yaml:"kubeCluster" json:"kubeCluster,omitempty"`
|
||||
Network NetworkConfig `yaml:"network" json:"network,omitempty"`
|
||||
Registry RegistryConfig `yaml:"registry" json:"registry,omitempty"`
|
||||
}
|
||||
|
||||
type KubeCluster struct {
|
||||
Version string `yaml:"version" json:"version,omitempty"`
|
||||
ImageRepo string `yaml:"imageRepo" json:"imageRepo,omitempty"`
|
||||
ClusterName string `yaml:"clusterName" json:"clusterName,omitempty"`
|
||||
}
|
||||
|
||||
// K2ClusterStatus defines the observed state of K2Cluster
|
||||
type K2ClusterStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
|
||||
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// K2Cluster is the Schema for the k2clusters API
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:path=k2clusters,scope=Namespaced
|
||||
type K2Cluster struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec K2ClusterSpec `json:"spec,omitempty"`
|
||||
Status K2ClusterStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// K2ClusterList contains a list of K2Cluster
|
||||
type K2ClusterList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []K2Cluster `json:"items"`
|
||||
}
|
||||
|
||||
//func init() {
|
||||
// SchemeBuilder.Register(&K2Cluster{}, &K2ClusterList{})
|
||||
//}
|
||||
|
||||
type HostCfg struct {
|
||||
HostName string `yaml:"hostName" json:"hostName,omitempty"`
|
||||
SSHAddress string `yaml:"sshAddress" json:"sshAddress,omitempty"`
|
||||
InternalAddress string `yaml:"internalAddress" json:"internalAddress,omitempty"`
|
||||
Port string `yaml:"port" json:"port,omitempty"`
|
||||
User string `yaml:"user" json:"user,omitempty"`
|
||||
Password string `yaml:"password" json:"password,omitempty"`
|
||||
SSHKeyPath string `yaml:"sshKeyPath" json:"sshKeyPath,omitempty"`
|
||||
Role []string `yaml:"role" json:"role,omitempty" norman:"type=array[enum],options=etcd|master|worker|client"`
|
||||
ID int `json:"-"`
|
||||
IsEtcd bool `json:"-"`
|
||||
IsMaster bool `json:"-"`
|
||||
IsWorker bool `json:"-"`
|
||||
IsClient bool `json:"-"`
|
||||
}
|
||||
|
||||
type Hosts struct {
|
||||
Hosts []HostCfg
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
Plugin string `yaml:"plugin" json:"plugin,omitempty"`
|
||||
KubePodsCIDR string `yaml:"kube_pods_cidr" json:"kube_pods_cidr,omitempty"`
|
||||
KubeServiceCIDR string `yaml:"kube_service_cidr" json:"kube_service_cidr,omitempty"`
|
||||
}
|
||||
|
||||
type LBKubeApiserverCfg struct {
|
||||
Domain string `yaml:"domain" json:"domain,omitempty"`
|
||||
Address string `yaml:"address" json:"address,omitempty"`
|
||||
Port string `yaml:"port" json:"port,omitempty"`
|
||||
}
|
||||
|
||||
type RegistryConfig struct {
|
||||
RegistryMirrors []string `yaml:"registryMirrors" json:"registryMirrors,omitempty"`
|
||||
InsecureRegistries []string `yaml:"insecureRegistries" json:"insecureRegistries,omitempty"`
|
||||
}
|
||||
|
||||
type ExternalEtcd struct {
|
||||
Endpoints []string
|
||||
CaFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GenerateHosts() []string {
|
||||
var lbHost string
|
||||
hostsList := []string{}
|
||||
|
||||
_, _, masters, _, _, _ := cfg.GroupHosts()
|
||||
if cfg.LBKubeApiserver.Address != "" {
|
||||
lbHost = fmt.Sprintf("%s %s", cfg.LBKubeApiserver.Address, cfg.LBKubeApiserver.Domain)
|
||||
} else {
|
||||
lbHost = fmt.Sprintf("%s %s", masters.Hosts[0].InternalAddress, DefaultLBDomain)
|
||||
}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
if host.HostName != "" {
|
||||
hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", host.InternalAddress, host.HostName, cfg.KubeCluster.ClusterName, host.HostName))
|
||||
}
|
||||
}
|
||||
|
||||
hostsList = append(hostsList, lbHost)
|
||||
return hostsList
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GenerateCertSANs() []string {
|
||||
clusterSvc := fmt.Sprintf("kubernetes.default.svc.%s", cfg.KubeCluster.ClusterName)
|
||||
defaultCertSANs := []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", clusterSvc, "localhost", "127.0.0.1"}
|
||||
extraCertSANs := []string{}
|
||||
|
||||
extraCertSANs = append(extraCertSANs, cfg.LBKubeApiserver.Domain)
|
||||
extraCertSANs = append(extraCertSANs, cfg.LBKubeApiserver.Address)
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
extraCertSANs = append(extraCertSANs, host.HostName)
|
||||
extraCertSANs = append(extraCertSANs, fmt.Sprintf("%s.%s", host.HostName, cfg.KubeCluster.ClusterName))
|
||||
if host.SSHAddress != cfg.LBKubeApiserver.Address {
|
||||
extraCertSANs = append(extraCertSANs, host.SSHAddress)
|
||||
}
|
||||
if host.InternalAddress != host.SSHAddress && host.InternalAddress != cfg.LBKubeApiserver.Address {
|
||||
extraCertSANs = append(extraCertSANs, host.InternalAddress)
|
||||
}
|
||||
}
|
||||
|
||||
extraCertSANs = append(extraCertSANs, util.ParseIp(cfg.Network.KubeServiceCIDR)[0])
|
||||
|
||||
defaultCertSANs = append(defaultCertSANs, extraCertSANs...)
|
||||
return defaultCertSANs
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) GroupHosts() (*Hosts, *Hosts, *Hosts, *Hosts, *Hosts, *Hosts) {
|
||||
allHosts := Hosts{}
|
||||
etcdHosts := Hosts{}
|
||||
masterHosts := Hosts{}
|
||||
workerHosts := Hosts{}
|
||||
k8sHosts := Hosts{}
|
||||
clientHost := Hosts{}
|
||||
|
||||
for _, host := range cfg.Hosts {
|
||||
//clusterNode := HostCfg{}
|
||||
if host.IsEtcd {
|
||||
etcdHosts.Hosts = append(etcdHosts.Hosts, host)
|
||||
}
|
||||
if host.IsMaster {
|
||||
masterHosts.Hosts = append(masterHosts.Hosts, host)
|
||||
}
|
||||
if host.IsWorker {
|
||||
workerHosts.Hosts = append(workerHosts.Hosts, host)
|
||||
}
|
||||
if host.IsMaster || host.IsWorker {
|
||||
k8sHosts.Hosts = append(k8sHosts.Hosts, host)
|
||||
}
|
||||
if host.IsClient {
|
||||
clientHost.Hosts = append(clientHost.Hosts, host)
|
||||
}
|
||||
allHosts.Hosts = append(allHosts.Hosts, host)
|
||||
}
|
||||
return &allHosts, &etcdHosts, &masterHosts, &workerHosts, &k8sHosts, &clientHost
|
||||
}
|
||||
|
||||
func (cfg *K2ClusterSpec) ClusterIP() string {
|
||||
return util.ParseIp(cfg.Network.KubeServiceCIDR)[2]
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
// NOTE: Boilerplate only. Ignore this file.
|
||||
|
||||
// Package v1alpha1 contains API Schema definitions for the kubekey v1alpha1 API group
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=kubekey.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
// "sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: "kubekey.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
|
||||
// Kind takes an unqualified kind and returns GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&K2Cluster{},
|
||||
&K2ClusterList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,252 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by operator-sdk. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
|
||||
*out = *in
|
||||
if in.Endpoints != nil {
|
||||
in, out := &in.Endpoints, &out.Endpoints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd.
|
||||
func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExternalEtcd)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostCfg) DeepCopyInto(out *HostCfg) {
|
||||
*out = *in
|
||||
if in.Role != nil {
|
||||
in, out := &in.Role, &out.Role
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostCfg.
|
||||
func (in *HostCfg) DeepCopy() *HostCfg {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HostCfg)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Hosts) DeepCopyInto(out *Hosts) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]HostCfg, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hosts.
|
||||
func (in *Hosts) DeepCopy() *Hosts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Hosts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *K2Cluster) DeepCopyInto(out *K2Cluster) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K2Cluster.
|
||||
func (in *K2Cluster) DeepCopy() *K2Cluster {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(K2Cluster)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *K2Cluster) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *K2ClusterList) DeepCopyInto(out *K2ClusterList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]K2Cluster, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K2ClusterList.
|
||||
func (in *K2ClusterList) DeepCopy() *K2ClusterList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(K2ClusterList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *K2ClusterList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *K2ClusterSpec) DeepCopyInto(out *K2ClusterSpec) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]HostCfg, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.LBKubeApiserver = in.LBKubeApiserver
|
||||
out.KubeCluster = in.KubeCluster
|
||||
out.Network = in.Network
|
||||
in.Registry.DeepCopyInto(&out.Registry)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K2ClusterSpec.
|
||||
func (in *K2ClusterSpec) DeepCopy() *K2ClusterSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(K2ClusterSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *K2ClusterStatus) DeepCopyInto(out *K2ClusterStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K2ClusterStatus.
|
||||
func (in *K2ClusterStatus) DeepCopy() *K2ClusterStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(K2ClusterStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeCluster) DeepCopyInto(out *KubeCluster) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeCluster.
|
||||
func (in *KubeCluster) DeepCopy() *KubeCluster {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeCluster)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LBKubeApiserverCfg) DeepCopyInto(out *LBKubeApiserverCfg) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBKubeApiserverCfg.
|
||||
func (in *LBKubeApiserverCfg) DeepCopy() *LBKubeApiserverCfg {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LBKubeApiserverCfg)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkConfig) DeepCopyInto(out *NetworkConfig) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfig.
|
||||
func (in *NetworkConfig) DeepCopy() *NetworkConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NetworkConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistryConfig) DeepCopyInto(out *RegistryConfig) {
|
||||
*out = *in
|
||||
if in.RegistryMirrors != nil {
|
||||
in, out := &in.RegistryMirrors, &out.RegistryMirrors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.InsecureRegistries != nil {
|
||||
in, out := &in.InsecureRegistries, &out.InsecureRegistries
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryConfig.
|
||||
func (in *RegistryConfig) DeepCopy() *RegistryConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RegistryConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"github.com/pixiake/kubekey/pkg/controller/k2cluster"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
|
||||
AddToManagerFuncs = append(AddToManagerFuncs, k2cluster.Add)
|
||||
}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
|
||||
var AddToManagerFuncs []func(manager.Manager) error
|
||||
|
||||
// AddToManager adds all Controllers to the Manager
|
||||
func AddToManager(m manager.Manager) error {
|
||||
for _, f := range AddToManagerFuncs {
|
||||
if err := f(m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
package k2cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
kubekeyv1alpha1 "github.com/pixiake/kubekey/pkg/apis/kubekey/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
var log = logf.Log.WithName("controller_k2cluster")
|
||||
|
||||
/**
|
||||
* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
|
||||
* business logic. Delete these comments after modifying this file.*
|
||||
*/
|
||||
|
||||
// Add creates a new K2Cluster Controller and adds it to the Manager. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileK2Cluster{client: mgr.GetClient(), scheme: mgr.GetScheme()}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("k2cluster-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to primary resource K2Cluster
|
||||
err = c.Watch(&source.Kind{Type: &kubekeyv1alpha1.K2Cluster{}}, &handler.EnqueueRequestForObject{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(user): Modify this to be the types you create that are owned by the primary resource
|
||||
// Watch for changes to secondary resource Pods and requeue the owner K2Cluster
|
||||
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
|
||||
IsController: true,
|
||||
OwnerType: &kubekeyv1alpha1.K2Cluster{},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blank assignment to verify that ReconcileK2Cluster implements reconcile.Reconciler
|
||||
var _ reconcile.Reconciler = &ReconcileK2Cluster{}
|
||||
|
||||
// ReconcileK2Cluster reconciles a K2Cluster object
|
||||
type ReconcileK2Cluster struct {
|
||||
// This client, initialized using mgr.Client() above, is a split client
|
||||
// that reads objects from the cache and writes to the apiserver
|
||||
client client.Client
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a K2Cluster object and makes changes based on the state read
|
||||
// and what is in the K2Cluster.Spec
|
||||
// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
|
||||
// a Pod as an example
|
||||
// Note:
|
||||
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
func (r *ReconcileK2Cluster) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
reqLogger.Info("Reconciling K2Cluster")
|
||||
|
||||
// Fetch the K2Cluster instance
|
||||
instance := &kubekeyv1alpha1.K2Cluster{}
|
||||
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// Request object not found, could have been deleted after reconcile request.
|
||||
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
|
||||
// Return and don't requeue
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Define a new Pod object
|
||||
pod := newPodForCR(instance)
|
||||
|
||||
// Set K2Cluster instance as the owner and controller
|
||||
if err := controllerutil.SetControllerReference(instance, pod, r.scheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Check if this Pod already exists
|
||||
found := &corev1.Pod{}
|
||||
err = r.client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, found)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
reqLogger.Info("Creating a new Pod", "Pod.Namespace", pod.Namespace, "Pod.Name", pod.Name)
|
||||
err = r.client.Create(context.TODO(), pod)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Pod created successfully - don't requeue
|
||||
return reconcile.Result{}, nil
|
||||
} else if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Pod already exists - don't requeue
|
||||
reqLogger.Info("Skip reconcile: Pod already exists", "Pod.Namespace", found.Namespace, "Pod.Name", found.Name)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// newPodForCR returns a busybox pod with the same name/namespace as the cr
|
||||
func newPodForCR(cr *kubekeyv1alpha1.K2Cluster) *corev1.Pod {
|
||||
labels := map[string]string{
|
||||
"app": cr.Name,
|
||||
}
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cr.Name + "-pod",
|
||||
Namespace: cr.Namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Command: []string{"sleep", "3600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
// +build tools
|
||||
|
||||
// Place any runtime dependencies as imports in this file.
|
||||
// Go modules will be forced to download and install them.
|
||||
package tools
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
package version
|
||||
|
||||
var (
|
||||
Version = "0.0.1"
|
||||
)
|
||||
Loading…
Reference in New Issue