support kube-vip

This commit is contained in:
zhouqiu0103 2022-07-25 11:10:35 +08:00
parent 544e94aa79
commit 0853e9c6a3
9 changed files with 485 additions and 12 deletions

View File

@ -354,8 +354,9 @@ func hostVerify(hostMap map[string]*KubeHost, hostName string, group string) err
}
func (c ControlPlaneEndpoint) IsInternalLBEnabled() bool {
if c.InternalLoadbalancer == Haproxy {
return true
}
return false
return c.InternalLoadbalancer == Haproxy
}
func (c ControlPlaneEndpoint) IsInternalLBEnabledVip() bool {
return c.InternalLoadbalancer == Kubevip
}

View File

@ -89,6 +89,7 @@ const (
Isula = "isula"
Haproxy = "haproxy"
Kubevip = "kube-vip"
)
func (cfg *ClusterSpec) SetDefaultClusterSpec(incluster bool) (*ClusterSpec, map[string][]*KubeHost) {

View File

@ -18,9 +18,9 @@ spec:
- node1
- node[10:100] # All the nodes in your cluster that serve as the worker nodes.
controlPlaneEndpoint:
internalLoadbalancer: haproxy #Internal loadbalancer for apiservers. [Default: ""]
internalLoadbalancer: haproxy #Internal loadbalancer for apiservers. Support: haproxy, kube-vip [Default: ""]
domain: lb.kubesphere.local
address: "" # The IP address of your load balancer.
address: "" # The IP address of your load balancer. If you use internalLoadblancer in "kube-vip" mode, a VIP is required here.
port: 6443
system:
ntpServers: # The ntp servers of chrony.

View File

@ -19,13 +19,14 @@ package images
import (
"encoding/json"
"fmt"
manifesttypes "github.com/estesp/manifest-tool/v2/pkg/types"
coreutil "github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/registry"
"io/ioutil"
"path/filepath"
"strings"
manifesttypes "github.com/estesp/manifest-tool/v2/pkg/types"
coreutil "github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/registry"
manifestregistry "github.com/estesp/manifest-tool/v2/pkg/registry"
kubekeyv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
"github.com/kubesphere/kubekey/pkg/common"
@ -60,6 +61,7 @@ func (p *PullImage) Execute(runtime connector.Runtime) error {
GetImage(runtime, p.KubeConf, "flannel"),
GetImage(runtime, p.KubeConf, "kubeovn"),
GetImage(runtime, p.KubeConf, "haproxy"),
GetImage(runtime, p.KubeConf, "kubevip"),
}
if err := i.PullImages(runtime, p.KubeConf); err != nil {
@ -124,6 +126,7 @@ func GetImage(runtime connector.ModuleRuntime, kubeConf *common.KubeConf, name s
"linux-utils": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: "openebs", Repo: "linux-utils", Tag: "2.10.0", Group: kubekeyv1alpha2.Worker, Enable: false},
// load balancer
"haproxy": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: "library", Repo: "haproxy", Tag: "2.3", Group: kubekeyv1alpha2.Worker, Enable: kubeConf.Cluster.ControlPlaneEndpoint.IsInternalLBEnabled()},
"kubevip": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: "plndr", Repo: "kube-vip", Tag: "v0.5.0", Group: kubekeyv1alpha2.Master, Enable: kubeConf.Cluster.ControlPlaneEndpoint.IsInternalLBEnabledVip()},
// kata-deploy
"kata-deploy": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: "kata-deploy", Tag: "stable", Group: kubekeyv1alpha2.Worker, Enable: kubeConf.Cluster.Kubernetes.EnableKataDeploy()},
// node-feature-discovery

View File

@ -17,6 +17,8 @@
package loadbalancer
import (
"path/filepath"
kubekeyapiv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/core/action"
@ -25,7 +27,6 @@ import (
"github.com/kubesphere/kubekey/pkg/core/task"
"github.com/kubesphere/kubekey/pkg/core/util"
"github.com/kubesphere/kubekey/pkg/loadbalancer/templates"
"path/filepath"
)
type HaproxyModule struct {
@ -136,6 +137,69 @@ func (h *HaproxyModule) Init() {
}
}
type KubevipModule struct {
common.KubeModule
Skip bool
}
func (k *KubevipModule) IsSkip() bool {
return k.Skip
}
func (k *KubevipModule) Init() {
k.Name = "InternalLoadbalancerModule"
k.Desc = "Install internal load balancer"
checkVIPAddress := &task.RemoteTask{
Name: "CheckVIPAddress",
Desc: "Check VIP Address",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(CheckVIPAddress),
Parallel: true,
}
getInterface := &task.RemoteTask{
Name: "GetNodeInterface",
Desc: "Get Node Interface",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Action: new(GetInterfaceName),
Parallel: true,
}
kubevipManifestOnlyFirstMaster := &task.RemoteTask{
Name: "GenerateKubevipManifest",
Desc: "Generate kubevip manifest at first master",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GenerateKubevipManifest),
Parallel: true,
}
kubevipManifestNotFirstMaster := &task.RemoteTask{
Name: "GenerateKubevipManifest",
Desc: "Generate kubevip manifest at other master",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: &common.OnlyFirstMaster{Not: true},
Action: new(GenerateKubevipManifest),
Parallel: true,
}
if exist, _ := k.BaseModule.PipelineCache.GetMustBool(common.ClusterExist); exist {
k.Tasks = []task.Interface{
checkVIPAddress,
getInterface,
kubevipManifestNotFirstMaster,
}
} else {
k.Tasks = []task.Interface{
checkVIPAddress,
getInterface,
kubevipManifestOnlyFirstMaster,
}
}
}
type K3sHaproxyModule struct {
common.KubeModule
Skip bool
@ -222,3 +286,60 @@ func (k *K3sHaproxyModule) Init() {
updateHostsFile,
}
}
type K3sKubevipModule struct {
common.KubeModule
Skip bool
}
func (k *K3sKubevipModule) IsSkip() bool {
return k.Skip
}
func (k *K3sKubevipModule) Init() {
k.Name = "InternalLoadbalancerModule"
k.Name = "Install internal load balancer"
checkVIPAddress := &task.RemoteTask{
Name: "CheckVIPAddress",
Desc: "Check VIP Address",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(CheckVIPAddress),
Parallel: true,
}
createManifestsFolder := &task.RemoteTask{
Name: "CreateManifestsFolder",
Desc: "Create Manifests Folder",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(CreateManifestsFolder),
Parallel: true,
}
getInterface := &task.RemoteTask{
Name: "GetNodeInterface",
Desc: "Get Node Interface",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GetInterfaceName),
Parallel: true,
}
kubevipDaemonsetK3s := &task.RemoteTask{
Name: "GenerateKubevipManifest",
Desc: "Generate kubevip manifest at other master",
Hosts: k.Runtime.GetHostsByRole(common.Master),
Prepare: new(common.OnlyFirstMaster),
Action: new(GenerateK3sKubevipDaemonset),
Parallel: true,
}
k.Tasks = []task.Interface{
checkVIPAddress,
createManifestsFolder,
getInterface,
kubevipDaemonsetK3s,
}
}

View File

@ -18,6 +18,9 @@ package loadbalancer
import (
"fmt"
"path/filepath"
"strconv"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/core/action"
"github.com/kubesphere/kubekey/pkg/core/connector"
@ -25,8 +28,6 @@ import (
"github.com/kubesphere/kubekey/pkg/images"
"github.com/kubesphere/kubekey/pkg/loadbalancer/templates"
"github.com/pkg/errors"
"path/filepath"
"strconv"
)
type GetChecksum struct {
@ -138,6 +139,67 @@ func (u *UpdateHosts) Execute(runtime connector.Runtime) error {
return nil
}
type CheckVIPAddress struct {
common.KubeAction
}
func (c *CheckVIPAddress) Execute(runtime connector.Runtime) error {
if c.KubeConf.Cluster.ControlPlaneEndpoint.Address == "" {
return errors.New("VIP address is empty")
} else {
return nil
}
}
type GetInterfaceName struct {
common.KubeAction
}
func (g *GetInterfaceName) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
cmd := fmt.Sprintf("ip route "+
"| grep %s "+
"| sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"", host.GetAddress())
interfaceName, err := runtime.GetRunner().SudoCmd(cmd, false)
if err != nil {
return err
}
if interfaceName == "" {
return errors.New("get interface failed")
}
// type: string
host.GetCache().Set("interface", interfaceName)
return nil
}
type GenerateKubevipManifest struct {
common.KubeAction
}
func (g *GenerateKubevipManifest) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
interfaceName, ok := host.GetCache().GetMustString("interface")
if !ok {
return errors.New("get interface failed")
}
templateAction := action.Template{
Template: templates.KubevipManifest,
Dst: filepath.Join(common.KubeManifestDir, templates.KubevipManifest.Name()),
Data: util.Data{
"VipInterface": interfaceName,
"KubeVip": g.KubeConf.Cluster.ControlPlaneEndpoint.Address,
"KubevipImage": images.GetImage(runtime, g.KubeConf, "kubevip").ImageName(),
},
}
templateAction.Init(nil, nil)
if err := templateAction.Execute(runtime); err != nil {
return err
}
return nil
}
type GenerateK3sHaproxyManifest struct {
common.KubeAction
}
@ -165,3 +227,44 @@ func (g *GenerateK3sHaproxyManifest) Execute(runtime connector.Runtime) error {
}
return nil
}
type CreateManifestsFolder struct {
action.BaseAction
}
func (h *CreateManifestsFolder) Execute(runtime connector.Runtime) error {
_, err := runtime.GetRunner().SudoCmd("mkdir -p /var/lib/rancher/k3s/server/manifests/", false)
if err != nil {
return err
}
return nil
}
type GenerateK3sKubevipDaemonset struct {
common.KubeAction
}
func (g *GenerateK3sKubevipDaemonset) Execute(runtime connector.Runtime) error {
host := runtime.RemoteHost()
interfaceName, ok := host.GetCache().GetMustString("interface")
if !ok {
return errors.New("get interface failed")
}
templateAction := action.Template{
Template: templates.K3sKubevipManifest,
Dst: filepath.Join("/var/lib/rancher/k3s/server/manifests/", templates.K3sKubevipManifest.Name()),
Data: util.Data{
"KubeVipVersion": images.GetImage(runtime, g.KubeConf, "kubevip").Tag,
"VipInterface": interfaceName,
"KubeVip": g.KubeConf.Cluster.ControlPlaneEndpoint.Address,
"KubevipImage": images.GetImage(runtime, g.KubeConf, "kubevip").ImageName(),
},
}
templateAction.Init(nil, nil)
if err := templateAction.Execute(runtime); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,150 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
import (
"text/template"
"github.com/lithammer/dedent"
)
var K3sKubevipManifest = template.Must(template.New("kube-vip-rbac.yaml").Parse(
dedent.Dedent(`
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: {{ .KubeVipVersion }}
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-vip-ds
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: {{ .KubeVipVersion }}
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: {{ .VipInterface }}
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: lb_enable
value: "true"
- name: lb_port
value: "6443"
- name: lb_fwdmethod
value: local
- name: address
value: {{ .KubeVip }}
- name: prometheus_server
value: :2112
image: {{ .KubevipImage }}
imagePullPolicy: Always
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0
`)))

View File

@ -0,0 +1,91 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
import (
"text/template"
"github.com/lithammer/dedent"
)
var KubevipManifest = template.Must(template.New("kube-vip.yaml").Parse(
dedent.Dedent(`
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: {{ .VipInterface }}
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: lb_enable
value: "true"
- name: lb_port
value: "6443"
- name: address
value: {{ .KubeVip }}
image: {{ .KubevipImage }}
imagePullPolicy: Always
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_TIME
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}
`)))

View File

@ -78,10 +78,12 @@ func NewCreateClusterPipeline(runtime *common.KubeRuntime) error {
&etcd.ConfigureModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey},
&etcd.BackupModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey},
&kubernetes.InstallKubeBinariesModule{},
&loadbalancer.KubevipModule{Skip: !runtime.Cluster.ControlPlaneEndpoint.IsInternalLBEnabledVip()},
&kubernetes.InitKubernetesModule{},
&dns.ClusterDNSModule{},
&kubernetes.StatusModule{},
&kubernetes.JoinNodesModule{},
&loadbalancer.KubevipModule{Skip: !runtime.Cluster.ControlPlaneEndpoint.IsInternalLBEnabledVip()},
&loadbalancer.HaproxyModule{Skip: !runtime.Cluster.ControlPlaneEndpoint.IsInternalLBEnabled()},
&network.DeployNetworkPluginModule{},
&kubernetes.ConfigureKubernetesModule{},
@ -168,6 +170,7 @@ func NewK3sCreateClusterPipeline(runtime *common.KubeRuntime) error {
&etcd.InstallETCDBinaryModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey},
&etcd.ConfigureModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey},
&etcd.BackupModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey},
&loadbalancer.K3sKubevipModule{Skip: !runtime.Cluster.ControlPlaneEndpoint.IsInternalLBEnabledVip()},
&k3s.InstallKubeBinariesModule{},
&k3s.InitClusterModule{},
&k3s.StatusModule{},