mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
Add the cilium supported
Signed-off-by: Forest-L <lilin@yunify.com>
This commit is contained in:
parent
4ec244fa22
commit
78f4b3c1c5
|
|
@ -39,6 +39,7 @@ const (
|
|||
DefaultCalicoVersion = "v3.15.1"
|
||||
DefaultFlannelVersion = "v0.12.0"
|
||||
DefaultCniVersion = "v0.8.6"
|
||||
DefaultCiliumVersion = "v1.8.3"
|
||||
DefaultHelmVersion = "v3.2.1"
|
||||
DefaultMaxPods = "110"
|
||||
DefaultNodeCidrMaskSize = "24"
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ func PullImages(mgr *manager.Manager, node *kubekeyapiv1alpha1.HostCfg) error {
|
|||
GetImage(mgr, "calico-cni"),
|
||||
GetImage(mgr, "calico-node"),
|
||||
GetImage(mgr, "calico-flexvol"),
|
||||
GetImage(mgr, "cilium"),
|
||||
GetImage(mgr, "operator-generic"),
|
||||
GetImage(mgr, "flannel"),
|
||||
}
|
||||
if err := i.PullImages(mgr, node); err != nil {
|
||||
|
|
@ -73,6 +75,8 @@ func GetImage(mgr *manager.Manager, name string) images.Image {
|
|||
"calico-flexvol": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "pod2daemon-flexvol", Tag: kubekeyapiv1alpha1.DefaultCalicoVersion, Group: kubekeyapiv1alpha1.K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico")},
|
||||
"calico-typha": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "calico", Repo: "typha", Tag: kubekeyapiv1alpha1.DefaultCalicoVersion, Group: kubekeyapiv1alpha1.K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "calico") && len(mgr.K8sNodes) > 50},
|
||||
"flannel": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "flannel", Tag: kubekeyapiv1alpha1.DefaultFlannelVersion, Group: kubekeyapiv1alpha1.K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "flannel")},
|
||||
"cilium": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "cilium", Repo: "cilium", Tag: kubekeyapiv1alpha1.DefaultCiliumVersion, Group: kubekeyapiv1alpha1.K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "cilium")},
|
||||
"operator-generic": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "cilium", Repo: "operator-generic", Tag: kubekeyapiv1alpha1.DefaultCiliumVersion, Group: kubekeyapiv1alpha1.K8s, Enable: strings.EqualFold(mgr.Cluster.Network.Plugin, "cilium")},
|
||||
// storage
|
||||
"provisioner-localpv": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "provisioner-localpv", Tag: "1.10.0", Group: kubekeyapiv1alpha1.Worker, Enable: false},
|
||||
"openebs-tools": {RepoAddr: mgr.Cluster.Registry.PrivateRegistry, Namespace: "kubesphere", Repo: "openebs-tools", Tag: "3.8", Group: kubekeyapiv1alpha1.Worker, Enable: false},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,700 @@
|
|||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cilium
|
||||
|
||||
import (
|
||||
"github.com/kubesphere/kubekey/pkg/cluster/preinstall"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
"github.com/lithammer/dedent"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var ciliumTempl = template.Must(template.New("kubeadmCfg").Parse(
|
||||
dedent.Dedent(`---
|
||||
# Source: cilium/charts/agent/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/config/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "false"
|
||||
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "false"
|
||||
enable-bpf-clock-probe: "true"
|
||||
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: medium
|
||||
|
||||
# The monitor aggregation interval governs the typical time between monitor
|
||||
# notification events for each allowed connection.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-interval: 5s
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-flags: all
|
||||
# bpf-policy-map-max specified the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "16384"
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: "0.0025"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# This may lead to policy drops or a change in loadbalancing decisions for a
|
||||
# connection for some time. Endpoints may need to be recreated to restore
|
||||
# connectivity.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "false"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: vxlan
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
|
||||
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
|
||||
wait-bpf-mount: "false"
|
||||
|
||||
masquerade: "true"
|
||||
enable-bpf-masquerade: "true"
|
||||
enable-xt-socket-fallback: "true"
|
||||
install-iptables-rules: "true"
|
||||
auto-direct-node-routes: "false"
|
||||
kube-proxy-replacement: "probe"
|
||||
enable-health-check-nodeport: "true"
|
||||
node-port-bind-protection: "true"
|
||||
enable-auto-protect-node-port-range: "true"
|
||||
enable-session-affinity: "true"
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
k8s-require-ipv6-pod-cidr: "false"
|
||||
enable-endpoint-health-checking: "true"
|
||||
enable-well-known-identities: "false"
|
||||
enable-remote-node-identity: "true"
|
||||
operator-api-serve-addr: "127.0.0.1:9234"
|
||||
ipam: "cluster-pool"
|
||||
cluster-pool-ipv4-cidr: "{{ .KubePodsCIDR }}"
|
||||
cluster-pool-ipv4-mask-size: "{{ .NodeCidrMaskSize }}"
|
||||
disable-cnp-status-updates: "true"
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
# deprecated remove in v1.9
|
||||
- ciliumidentities/status
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains ToGroup to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
- ciliumidentities/status
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# For cilium-operator running in HA mode.
|
||||
#
|
||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
||||
# between mulitple running instances.
|
||||
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
||||
# common and fewer objects in the cluster watch "all Leases".
|
||||
# The support for leases was introduced in coordination.k8s.io/v1 during Kubernetes 1.14 release.
|
||||
# In Cilium we currently don't support HA mode for K8s version < 1.14. This condition make sure
|
||||
# that we only authorize access to leases resources in supported K8s versions.
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# This annotation plus the CriticalAddonsOnly toleration makes
|
||||
# cilium to be a critical pod in the cluster, which ensures cilium
|
||||
# gets priority scheduling.
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: io.cilium/app
|
||||
operator: In
|
||||
values:
|
||||
- operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
weight: 100
|
||||
containers:
|
||||
- args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
command:
|
||||
- cilium-agent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 10
|
||||
# The initial delay for the liveness probe is intentionally large to
|
||||
# avoid an endless kill & restart cycle if in the event that the initial
|
||||
# bootstrapping takes longer than expected.
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_FLANNEL_MASTER_DEVICE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: flannel-master-device
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: flannel-uninstall-on-exit
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cni-chaining-mode
|
||||
name: cilium-con
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: custom-cni-conf
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "{{ .CiliumImage }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--enable-debug=false"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
name: cilium-agent
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- SYS_MODULE
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-path
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: etc-cni-netd
|
||||
- mountPath: /var/lib/cilium/clustermesh
|
||||
name: clustermesh-secrets
|
||||
readOnly: true
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
hostNetwork: true
|
||||
initContainers:
|
||||
- command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-bpf-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_WAIT_BPF_MOUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: wait-bpf-mount
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "{{ .CiliumImage }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: clean-cilium-state
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: cilium
|
||||
serviceAccountName: cilium
|
||||
terminationGracePeriodSeconds: 1
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
name: cilium-run
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
name: bpf-maps
|
||||
# To install cilium cni plugin in the host
|
||||
- hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
name: cni-path
|
||||
# To install cilium cni configuration in the host
|
||||
- hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
name: etc-cni-netd
|
||||
# To be able to load kernel modules
|
||||
- hostPath:
|
||||
path: /lib/modules
|
||||
name: lib-modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
name: xtables-lock
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: cilium-clustermesh
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# We support HA mode only for Kubernetes version > 1.14
|
||||
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
||||
# for more details.
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: io.cilium/app
|
||||
operator: In
|
||||
values:
|
||||
- operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
weight: 100
|
||||
containers:
|
||||
- args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_DEFAULT_REGION
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
image: "{{ .OperatorGenericImage }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cilium-operator
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccount: cilium-operator
|
||||
serviceAccountName: cilium-operator
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
`)))
|
||||
|
||||
func GenerateCiliumFiles(mgr *manager.Manager) (string, error) {
|
||||
return util.Render(ciliumTempl, util.Data{
|
||||
"KubePodsCIDR": mgr.Cluster.Network.KubePodsCIDR,
|
||||
"NodeCidrMaskSize": mgr.Cluster.Kubernetes.NodeCidrMaskSize,
|
||||
"CiliumImage": preinstall.GetImage(mgr, "cilium").ImageName(),
|
||||
"OperatorGenericImage": preinstall.GetImage(mgr, "operator-generic").ImageName(),
|
||||
})
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
kubekeyapiv1alpha1 "github.com/kubesphere/kubekey/api/v1alpha1"
|
||||
"github.com/kubesphere/kubekey/pkg/plugins/network/calico"
|
||||
"github.com/kubesphere/kubekey/pkg/plugins/network/cilium"
|
||||
"github.com/kubesphere/kubekey/pkg/plugins/network/flannel"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/kubesphere/kubekey/pkg/util/manager"
|
||||
|
|
@ -51,6 +52,10 @@ func deployNetworkPlugin(mgr *manager.Manager, _ *kubekeyapiv1alpha1.HostCfg) er
|
|||
if err := deployMacvlan(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "cilium":
|
||||
if err := deployCilium(mgr); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("This network plugin is not supported: %s", mgr.Cluster.Network.Plugin))
|
||||
}
|
||||
|
|
@ -134,3 +139,32 @@ func deployFlannel(mgr *manager.Manager) error {
|
|||
func deployMacvlan() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deployCilium(mgr *manager.Manager) error {
|
||||
if !util.IsExist(fmt.Sprintf("%s/network-plugin.yaml", mgr.WorkDir)) {
|
||||
ciliumContent, err := cilium.GenerateCiliumFiles(mgr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := ioutil.WriteFile(fmt.Sprintf("%s/network-plugin.yaml", mgr.WorkDir), []byte(ciliumContent), 0644)
|
||||
if err1 != nil {
|
||||
return errors.Wrap(errors.WithStack(err1), fmt.Sprintf("Failed to generate network plugin manifests: %s/network-plugin.yaml", mgr.WorkDir))
|
||||
}
|
||||
}
|
||||
|
||||
ciliumBase64, err1 := exec.Command("/bin/bash", "-c", fmt.Sprintf("tar cfz - -C %s -T /dev/stdin <<< network-plugin.yaml | base64 --wrap=0", mgr.WorkDir)).CombinedOutput()
|
||||
if err1 != nil {
|
||||
return errors.Wrap(errors.WithStack(err1), "Failed to read network plugin manifests")
|
||||
}
|
||||
|
||||
_, err2 := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/bash -c \"base64 -d <<< '%s' | tar xz -C %s\"", strings.TrimSpace(string(ciliumBase64)), "/etc/kubernetes"), 2, false)
|
||||
if err2 != nil {
|
||||
return errors.Wrap(errors.WithStack(err2), "Failed to generate network plugin manifests")
|
||||
}
|
||||
|
||||
_, err3 := mgr.Runner.ExecuteCmd("sudo -E /bin/sh -c \"/usr/local/bin/kubectl apply -f /etc/kubernetes/network-plugin.yaml --force\"", 5, true)
|
||||
if err3 != nil {
|
||||
return errors.Wrap(errors.WithStack(err2), "Failed to deploy network plugin")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue