feat: add create cluster command

Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
joyceliu 2024-03-12 17:39:09 +08:00
parent 6a7dbabd49
commit bdef602ddf
205 changed files with 15792 additions and 3428 deletions

View File

@ -3,12 +3,13 @@ create-role: ## create a role necessary file in roles
@echo "Creating role $(role)..."
@mkdir -p roles/$(role)/tasks
@echo "---" > roles/$(role)/tasks/main.yaml
@mkdir -p roles/$(role)/defaults
@echo "" > roles/$(role)/defaults/main.yaml
ifeq ($(VARIABLE_NAME),"full")
@mkdir -p roles/$(role)/handlers
@mkdir -p roles/$(role)/templates
@mkdir -p roles/$(role)/files
@mkdir -p roles/$(role)/vars
@mkdir -p roles/$(role)/defaults
@mkdir -p roles/$(role)/meta
@echo "---" > roles/$(role)/handlers/main.yaml
@echo "---" > roles/$(role)/templates/main.yaml
@ -19,3 +20,6 @@ ifeq ($(VARIABLE_NAME),"full")
endif
@echo "Role $(role) created successfully"
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n\nTargets:\n"} /^[0-9A-Za-z_-]+:.*?##/ { printf " \033[36m%-45s\033[0m %s\n", $$1, $$2 } /^\$$\([0-9A-Za-z_-]+\):.*?##/ { gsub("_","-", $$1); printf " \033[36m%-45s\033[0m %s\n", tolower(substr($$1, 3, length($$1)-7)), $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

27
builtin/const.go Normal file
View File

@ -0,0 +1,27 @@
/*
Copyright 2024 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// the file contains two default file inventory.yaml and config.yaml
package builtin
import _ "embed"
//go:embed inventory/inventory.yaml
var DefaultInventory []byte
//go:embed inventory/config.yaml
var DefaultConfig []byte

View File

@ -1,3 +1,6 @@
//go:build builtin
// +build builtin
/*
Copyright 2023 The KubeSphere Authors.
@ -14,11 +17,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package project
package builtin
import (
"embed"
)
//go:embed inventory playbooks roles
var InternalPipeline embed.FS
//go:embed playbooks roles
var BuiltinPipeline embed.FS

View File

@ -0,0 +1,48 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Config
metadata:
name: example
spec:
# zone for kk. how to download files
# kkzone: cn
# work_dir is the directory where the artifact is extracted.
# work_dir: /var/lib/kubekey/
# cni binary
cni_version: v1.2.0
# helm binary
helm_version: v3.14.2
# docker-compose binary
dockercompose_version: v2.24.6
# harbor image tag
harbor_version: v2.10.1
# registry image tag
registry_version: 2.8.3
# keepalived image tag
keepalived_version: stable
# runc binary
runc_version: v1.1.11
# calicoctl binary
calico_version: v3.27.2
# etcd binary
etcd_version: v3.5.6
# crictl binary
crictl_version: v1.29.0
# cilium helm
cilium_version: 1.15.4
# kubeovn helm
kubeovn_version: 0.1.0
# hybridnet helm
hybridnet_version: 0.6.8
# containerd binary
containerd_version: v1.7.0
# docker binary
docker_version: 24.0.6
# cridockerd
cridockerd_version: v0.3.10
# the version of kubernetes to be installed.
# should be greater than or equal to kube_version_min_required.
kube_version: v1.23.15
# nfs provisioner helm version
nfs_provisioner_version: 4.0.18
# oras binary
oras_version: v1.1.0

View File

@ -25,19 +25,23 @@ spec:
k8s_cluster:
groups:
- kube_control_plane
- kube_node
- kube_worker
# control_plane nodes
kube_control_plane:
hosts:
- localhost
# worker nodes
kube_node:
kube_worker:
hosts:
- localhost
# etcd nodes when etcd_deployment_type is external
etcd:
hosts:
- localhost
registry:
image_registry:
hosts:
- localhost
# nfs nodes for registry storage. and kubernetes nfs storage
nfs:
hosts:
- localhost

View File

@ -0,0 +1,10 @@
---
- import_playbook: pre_install.yaml
- import_playbook: precheck.yaml
- import_playbook: init.yaml
- import_playbook: install.yaml
- import_playbook: post_install.yaml

View File

@ -0,0 +1,12 @@
---
- hosts:
- localhost
roles:
- init/init-artifact
- hosts:
- etcd
- k8s_cluster
- registry
roles:
- init/init-os

View File

@ -0,0 +1,39 @@
---
- hosts:
- nfs
gather_facts: true
roles:
- install/nfs
- hosts:
- etcd
gather_facts: true
roles:
- install/etcd
- hosts:
- image_registry
gather_facts: true
roles:
- install/image-registry
- hosts:
- k8s_cluster
gather_facts: true
roles:
- install/cri
- install/kubernetes
- hosts:
- kube_control_plane
roles:
- role: install/certs
when: renew_certs.enabled|default_if_none:false
- hosts:
- k8s_cluster|random
roles:
- addons/cni
- addons/kata
- addons/nfd
- addons/sc

View File

@ -0,0 +1,19 @@
---
- name: Execute post install scripts
hosts:
- all
tasks:
- name: Copy post install scripts to remote
ignore_errors: yes
copy:
src: "{{ work_dir }}/scripts/post_install_{{ inventory_name }}.sh"
dest: "/etc/kubekey/scripts/post_install_{{ inventory_name }}.sh"
- name: Execute post install scripts
command: |
for file in /etc/kubekey/scripts/post_install_*.sh; do
if [ -f $file ]; then
# execute file
chmod +x $file
$file
fi
done

View File

@ -0,0 +1,19 @@
---
- name: Execute pre install scripts
hosts:
- all
tasks:
- name: Copy pre install scripts to remote
ignore_errors: yes
copy:
src: "{{ work_dir }}/scripts/pre_install_{{ inventory_name }}.sh"
dest: "/etc/kubekey/scripts/pre_install_{{ inventory_name }}.sh"
- name: Execute pre install scripts
command: |
for file in /etc/kubekey/scripts/pre_install_*.sh; do
if [ -f $file ]; then
# execute file
chmod +x $file
$file
fi
done

View File

@ -1,13 +1,15 @@
---
- hosts:
- k8s_cluster
- etcd
gather_facts: true
roles:
- precheck/env_check
- hosts:
- localhost
roles:
- role: precheck/artifact_check
when: artifact_file | defined
when: artifact.artifact_file | defined
- hosts:
- k8s_cluster
- etcd
- image_registry
- nfs
gather_facts: true
roles:
- precheck/env_check

View File

@ -0,0 +1,49 @@
cni:
kube_proxy: "{{ kubernetes.kube_proxy.enabled|default_if_none:true }}"
# apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja
api_version_policy: "{%if (kube_version|version:'<v1.21') %}policy/v1beta1{% else %}policy/v1{% endif %}"
kube_network_plugin: "{{ kubernetes.kube_network_plugin | default_if_none:'calico' }}"
# ip cidr config.
# dual stack. support ipv4/ipv6
ipv6_support: "{% if (kubernetes.networking.pod_cidr|split:','|length>1) %}true{% else %}false{% endif %}"
kube_pods_v4_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|first }}"
kube_pods_v6_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|last }}"
node_cidr_mask_size: "{{ kubernetes.controller_manager.kube_network_node_prefix|default_if_none:24 }}"
kube_svc_cidr: "{{ kubernetes.networking.service_cidr|default_if_none:'10.233.0.0/18' }}"
multus:
enabled: false
image: kubesphere/multus-cni:v3.8
calico:
# when cluster node > 50. it default true.
typha: "{%if (groups['k8s_cluster']|length > 50) %}true{% else %}false{% endif %}"
veth_mtu: 0
ipip_mode: Always
vxlan_mode: Never
# true is enabled
ipv4pool_nat_outgoing: true
# true is enabled
default_ip_pool: true
# image
cni_image: "calico/cni:{{ calico_version }}"
node_image: "calico/node:{{ calico_version }}"
kube_controller_image: "calico/kube-controllers:{{ calico_version }}"
typha_image: "calico/typha:{{ calico_version }}"
replicas: 1
node_selector: {}
flannel:
# https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md
backend: vxlan
cni_plugin_image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
flannel_image: "docker.io/flannel/flannel:{{ flannel_version }}"
cilium:
operator_image: cilium/operator-generic:1.15.3
cilium_image: cilium/cilium:1.15.3
k8s_endpoint: "{% if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ groups['kube_control_plane']|first }}{% endif %}"
k8s_port: "{{ kubernetes.apiserver.port|default_if_none:6443 }}"
kubeovn:
replica: 1
registry: docker.io/kubeovn
hybridnet:
registryURL: docker.io
# hybridnet_image: hybridnetdev/hybridnet
# hybridnet_tag: v0.8.8

View File

@ -0,0 +1,9 @@
---
- name: Generate calico manifest
template:
src: "calico/{{ calico_version|split:'.'|slice:':2'|join:'.' }}.yaml"
dest: "/etc/kubernetes/cni/calico-{{ calico_version }}.yaml"
- name: Apply calico
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/cni/calico-{{ calico_version }}.yaml --force

View File

@ -0,0 +1,21 @@
---
- name: Sync cilium helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/cilium-{{ cilium_version }}.tgz"
dest: "/etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz"
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
- name: Install cilium
command: |
helm install cilium /etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz --namespace kube-system \
--set operator.image.override={{ cni.cilium.operator_image }} \
--set operator.replicas={{ cni.cilium.operator_replicas }} \
--set image.override={{ cni.cilium.cilium_image }} \
--set ipv6.enabled={% if (cni.ipv6_support=="true") %}true{%else%}false{% endif %} \
--set ipv4NativeRoutingCIDR: {{ cni.kube_pods_v4_cidr }} \
{% if (cni.ipv6_support=="true") %}
--set ipv6NativeRoutingCIDR: {{ cni.kube_pods_v6_cidr }} \
{% endif %}
{% if (cni.kube_proxy=="true") %}
--set kubeProxyReplacement=strict --set k8sServiceHost={{ cni.cilium.k8s_endpoint }} --set k8sServicePort={{ cni.cilium.k8s_port }}
{% endif %}

View File

@ -0,0 +1,10 @@
---
# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
- name: Generate flannel manifest
template:
src: "flannel/flannel.yaml"
dest: "/etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml"
- name: Apply calico
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml

View File

@ -0,0 +1,17 @@
---
- name: Sync hybridnet helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/hybridnet-{{ hybridnet_version }}.tgz"
dest: "/etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz"
# https://artifacthub.io/packages/helm/hybridnet/hybridnet
- name: Install hybridnet
command: |
helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz --namespace kube-system \
{% if cni.hybridnet.hybridnet_image %}
--set images.hybridnet.image={{ cni.hybridnet.hybridnet_image }} \
{% endif %}
{% if cni.hybridnet.hybridnet_tag %}
--set images.hybridnet.tag={{ cni.hybridnet.hybridnet_tag }} \
{% endif %}
--set image.registryURL={{ cni.hybridnet.registryURL }} \

View File

@ -0,0 +1,24 @@
---
- name: Add kubeovn label to node
command: |
kubectl label node -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite
kubectl label node -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite
# kubeovn-0.1.0.tgz is helm version not helm appVersion
- name: Sync kubeovn helm chart to remote
copy:
src: "{{ work_dir }}/kubekey/cni/kubeovn-{{ kubeovn_version }}.tgz"
dest: "/etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz"
# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
- name: Install kubeovn
command: |
helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz --set replicaCount={{ cni.kubeovn.replica }} \
--set MASTER_NODES={% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %} \
--set global.registry.address={{ cni.kubeovn.registry }} \
--set ipv4.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
{% if (cni.ipv6_support=="true") %}
--set networking.NET_STACK=dual_stack \
--set dual_stack.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }},{{ cni.kubeovn.kube_pods_v6_cidr }} \
--set dual_stack.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
{% endif %}

View File

@ -0,0 +1,18 @@
---
- include_tasks: calico.yaml
when: cni.kube_network_plugin == "calico"
- include_tasks: flannel.yaml
when: cni.kube_network_plugin == "flannel"
- include_tasks: cilium.yaml
when: cni.kube_network_plugin == "cilium"
- include_tasks: kubeovn.yaml
when: cni.kube_network_plugin == "kubeovn"
- include_tasks: hybridnet.yaml
when: cni.kube_network_plugin == "hyvbridnet"
- include_tasks: multus.yaml
when: cni.multus.enabled

View File

@ -0,0 +1,9 @@
---
- name: Generate multus yaml
template:
src: multus/multus.yaml
desc: /etc/kubernetes/cni/cmultus.yaml
- name: Apply multus
command: |
kubectl apply -f /etc/kubernetes/cni/cmultus.yaml

View File

@ -0,0 +1,35 @@
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: {{ cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
{% if (cni.calico.typha=="true") %}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: {{ cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{% endif %}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,213 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{ cni.kube_pods_v4_cidr }}",
{% if (cni.ipv6_support=="true") %}
"EnableIPv6": true,
"IPv6Network":"{{ cni.kube_pods_v6_cidr }}",
{% endif %}
"EnableNFTables": {{ cni.kube_proxy }},
"Backend": {
"Type": "{{ cni.flannel.backend }}"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: {{ cni.flannel.cni_plugin_image }}
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: {{ cni.flannel.flannel_image }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: {{ cni.flannel.flannel_image }}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@ -0,0 +1,206 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "default-cni-network",
"plugins": [
{
"type": "flannel",
"name": "flannel.1",
"delegate": {
"isDefaultGateway": true,
"hairpinMode": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: {{ .MultusImage }}
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

View File

@ -0,0 +1,206 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "default-cni-network",
"plugins": [
{
"type": "flannel",
"name": "flannel.1",
"delegate": {
"isDefaultGateway": true,
"hairpinMode": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: {{ cni.multus.image }}
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

View File

@ -0,0 +1,3 @@
kata:
enabled: false
image: kubesphere/kata-deploy:stable

View File

@ -0,0 +1,11 @@
---
- name: Generate kata deploy file
template:
src: "kata-deploy.yaml"
dest: "/etc/kubernetes/addons/kata-deploy.yaml"
when: kata.enabled
- name: Deploy kata
command: |
kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml
when: kata.enabled

View File

@ -0,0 +1,127 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kata-label-node
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-labeler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kata-label-node-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: node-labeler
subjects:
- kind: ServiceAccount
name: kata-label-node
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kata-deploy
namespace: kube-system
spec:
selector:
matchLabels:
name: kata-deploy
template:
metadata:
labels:
name: kata-deploy
spec:
serviceAccountName: kata-label-node
containers:
- name: kube-kata
image: {{ kata.image }}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: false
volumeMounts:
- name: crio-conf
mountPath: /etc/crio/
- name: containerd-conf
mountPath: /etc/containerd/
- name: kata-artifacts
mountPath: /opt/kata/
- name: dbus
mountPath: /var/run/dbus
- name: systemd
mountPath: /run/systemd
- name: local-bin
mountPath: /usr/local/bin/
volumes:
- name: crio-conf
hostPath:
path: /etc/crio/
- name: containerd-conf
hostPath:
path: /etc/containerd/
- name: kata-artifacts
hostPath:
path: /opt/kata/
type: DirectoryOrCreate
- name: dbus
hostPath:
path: /var/run/dbus
- name: systemd
hostPath:
path: /run/systemd
- name: local-bin
hostPath:
path: /usr/local/bin/
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-qemu
handler: kata-qemu
overhead:
podFixed:
memory: "160Mi"
cpu: "250m"
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-clh
handler: kata-clh
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1beta1
metadata:
name: kata-fc
handler: kata-fc
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"

View File

@ -0,0 +1,3 @@
nfd:
enabled: false
image: kubesphere/node-feature-discovery:v0.10.0

View File

@ -0,0 +1,11 @@
---
- name: Generate nfd deploy file
template:
src: "nfd-deploy.yaml"
dest: "/etc/kubernetes/addons/nfd-deploy.yaml"
when: nfd.enabled
- name: Deploy nfd
command: |
kubectl apply -f /etc/kubernetes/addons/nfd-deploy.yaml
when: nfd.enabled

View File

@ -0,0 +1,621 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: nodefeaturerules.nfd.k8s-sigs.io
spec:
group: nfd.k8s-sigs.io
names:
kind: NodeFeatureRule
listKind: NodeFeatureRuleList
plural: nodefeaturerules
singular: nodefeaturerule
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeFeatureRule resource specifies a configuration for feature-based customization of node objects, such as node labeling.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NodeFeatureRuleSpec describes a NodeFeatureRule.
properties:
rules:
description: Rules is a list of node customization rules.
items:
description: Rule defines a rule for node customization such as labeling.
properties:
labels:
additionalProperties:
type: string
description: Labels to create if the rule matches.
type: object
labelsTemplate:
description: LabelsTemplate specifies a template to expand for dynamically generating multiple labels. Data (after template expansion) must be keys with an optional value (<key>[=<value>]) separated by newlines.
type: string
matchAny:
description: MatchAny specifies a list of matchers one of which must match.
items:
description: MatchAnyElem specifies one sub-matcher of MatchAny.
properties:
matchFeatures:
description: MatchFeatures specifies a set of matcher terms all of which must match.
items:
description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
properties:
feature:
type: string
matchExpressions:
additionalProperties:
description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
properties:
op:
description: Op is the operator to be applied.
enum:
- In
- NotIn
- InRegexp
- Exists
- DoesNotExist
- Gt
- Lt
- GtLt
- IsTrue
- IsFalse
type: string
value:
description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
items:
type: string
type: array
required:
- op
type: object
description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
type: object
required:
- feature
- matchExpressions
type: object
type: array
required:
- matchFeatures
type: object
type: array
matchFeatures:
description: MatchFeatures specifies a set of matcher terms all of which must match.
items:
description: FeatureMatcherTerm defines requirements against one feature set. All requirements (specified as MatchExpressions) are evaluated against each element in the feature set.
properties:
feature:
type: string
matchExpressions:
additionalProperties:
description: "MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates the input against. \n NB: CreateMatchExpression or MustCreateMatchExpression() should be used for creating new instances. NB: Validate() must be called if Op or Value fields are modified or if a new instance is created from scratch without using the helper functions."
properties:
op:
description: Op is the operator to be applied.
enum:
- In
- NotIn
- InRegexp
- Exists
- DoesNotExist
- Gt
- Lt
- GtLt
- IsTrue
- IsFalse
type: string
value:
description: Value is the list of values that the operand evaluates the input against. Value should be empty if the operator is Exists, DoesNotExist, IsTrue or IsFalse. Value should contain exactly one element if the operator is Gt or Lt and exactly two elements if the operator is GtLt. In other cases Value should contain at least one element.
items:
type: string
type: array
required:
- op
type: object
description: MatchExpressionSet contains a set of MatchExpressions, each of which is evaluated against a set of input values.
type: object
required:
- feature
- matchExpressions
type: object
type: array
name:
description: Name of the rule.
type: string
vars:
additionalProperties:
type: string
description: Vars is the variables to store if the rule matches. Variables do not directly inflict any changes in the node object. However, they can be referenced from other rules enabling more complex rule hierarchies, without exposing intermediary output values as labels.
type: object
varsTemplate:
description: VarsTemplate specifies a template to expand for dynamically generating multiple variables. Data (after template expansion) must be keys with an optional value (<key>[=<value>]) separated by newlines.
type: string
required:
- name
type: object
type: array
required:
- rules
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- patch
- update
- list
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update
- apiGroups:
- nfd.k8s-sigs.io
resources:
- nodefeaturerules
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: v1
data:
nfd-worker.conf: |
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# featureSources: [all]
# labelSources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4"
# - "SSE42"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# # The following feature demonstrates the capabilities of the matchFeatures
# - name: "my custom rule"
# labels:
# my-ng-feature: "true"
# # matchFeatures implements a logical AND over all matcher terms in the
# # list (i.e. all of the terms, or per-feature matchers, must match)
# matchFeatures:
# - feature: cpu.cpuid
# matchExpressions:
# AVX512F: {op: Exists}
# - feature: cpu.cstate
# matchExpressions:
# enabled: {op: IsTrue}
# - feature: cpu.pstate
# matchExpressions:
# no_turbo: {op: IsFalse}
# scaling_governor: {op: In, value: ["performance"]}
# - feature: cpu.rdt
# matchExpressions:
# RDTL3CA: {op: Exists}
# - feature: cpu.sst
# matchExpressions:
# bf.enabled: {op: IsTrue}
# - feature: cpu.topology
# matchExpressions:
# hardware_multithreading: {op: IsFalse}
#
# - feature: kernel.config
# matchExpressions:
# X86: {op: Exists}
# LSM: {op: InRegexp, value: ["apparmor"]}
# - feature: kernel.loadedmodule
# matchExpressions:
# e1000e: {op: Exists}
# - feature: kernel.selinux
# matchExpressions:
# enabled: {op: IsFalse}
# - feature: kernel.version
# matchExpressions:
# major: {op: In, value: ["5"]}
# minor: {op: Gt, value: ["10"]}
#
# - feature: storage.block
# matchExpressions:
# rotational: {op: In, value: ["0"]}
# dax: {op: In, value: ["0"]}
#
# - feature: network.device
# matchExpressions:
# operstate: {op: In, value: ["up"]}
# speed: {op: Gt, value: ["100"]}
#
# - feature: memory.numa
# matchExpressions:
# node_count: {op: Gt, value: ["2"]}
# - feature: memory.nv
# matchExpressions:
# devtype: {op: In, value: ["nd_dax"]}
# mode: {op: In, value: ["memory"]}
#
# - feature: system.osrelease
# matchExpressions:
# ID: {op: In, value: ["fedora", "centos"]}
# - feature: system.name
# matchExpressions:
# nodename: {op: InRegexp, value: ["^worker-X"]}
#
# - feature: local.label
# matchExpressions:
# custom-feature-knob: {op: Gt, value: ["100"]}
#
# # The following feature demonstrates the capabilities of the matchAny
# - name: "my matchAny rule"
# labels:
# my-ng-feature-2: "my-value"
# # matchAny implements a logical IF over all elements (sub-matchers) in
# # the list (i.e. at least one feature matcher must match)
# matchAny:
# - matchFeatures:
# - feature: kernel.loadedmodule
# matchExpressions:
# driver-module-X: {op: Exists}
# - feature: pci.device
# matchExpressions:
# vendor: {op: In, value: ["8086"]}
# class: {op: In, value: ["0200"]}
# - matchFeatures:
# - feature: kernel.loadedmodule
# matchExpressions:
# driver-module-Y: {op: Exists}
# - feature: usb.device
# matchExpressions:
# vendor: {op: In, value: ["8086"]}
# class: {op: In, value: ["02"]}
#
# # The following features demonstreate label templating capabilities
# - name: "my template rule"
# labelsTemplate: |
# matchFeatures:
# - feature: system.osrelease
# matchExpressions:
# ID: {op: InRegexp, value: ["^open.*"]}
# VERSION_ID.major: {op: In, value: ["13", "15"]}
#
# - name: "my template rule 2"
# matchFeatures:
# - feature: pci.device
# matchExpressions:
# class: {op: InRegexp, value: ["^06"]}
# vendor: ["8086"]
# - feature: cpu.cpuid
# matchExpressions:
# AVX: {op: Exists}
#
# # The following examples demonstrate vars field and back-referencing
# # previous labels and vars
# - name: "my dummy kernel rule"
# labels:
# "my.kernel.feature": "true"
# matchFeatures:
# - feature: kernel.version
# matchExpressions:
# major: {op: Gt, value: ["2"]}
#
# - name: "my dummy rule with no labels"
# vars:
# "my.dummy.var": "1"
# matchFeatures:
# - feature: cpu.cpuid
# matchExpressions: {}
#
# - name: "my rule using backrefs"
# labels:
# "my.backref.feature": "true"
# matchFeatures:
# - feature: rule.matched
# matchExpressions:
# my.kernel.feature: {op: IsTrue}
# my.dummy.var: {op: Gt, value: ["0"]}
#
kind: ConfigMap
metadata:
name: nfd-worker-conf
namespace: node-feature-discovery
---
apiVersion: v1
kind: Service
metadata:
name: nfd-master
namespace: node-feature-discovery
spec:
ports:
- port: 8080
protocol: TCP
selector:
app: nfd-master
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfd
name: nfd-master
namespace: node-feature-discovery
spec:
replicas: 1
selector:
matchLabels:
app: nfd-master
template:
metadata:
labels:
app: nfd-master
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
weight: 1
- preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ""
weight: 1
containers:
- args: []
command:
- nfd-master
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ nfd.image }}
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /usr/bin/grpc_health_probe
- -addr=:8080
initialDelaySeconds: 10
periodSeconds: 10
name: nfd-master
readinessProbe:
exec:
command:
- /usr/bin/grpc_health_probe
- -addr=:8080
failureThreshold: 10
initialDelaySeconds: 5
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts: []
serviceAccount: nfd-master
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: ""
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
value: ""
volumes: []
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: nfd
name: nfd-worker
namespace: node-feature-discovery
spec:
selector:
matchLabels:
app: nfd-worker
template:
metadata:
labels:
app: nfd-worker
spec:
containers:
- args:
- -server=nfd-master:8080
command:
- nfd-worker
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: {{ nfd.image }}
imagePullPolicy: IfNotPresent
name: nfd-worker
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /host-boot
name: host-boot
readOnly: true
- mountPath: /host-etc/os-release
name: host-os-release
readOnly: true
- mountPath: /host-sys
name: host-sys
readOnly: true
- mountPath: /host-usr/lib
name: host-usr-lib
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery/source.d/
name: source-d
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery/features.d/
name: features-d
readOnly: true
- mountPath: /etc/kubernetes/node-feature-discovery
name: nfd-worker-conf
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
volumes:
- hostPath:
path: /boot
name: host-boot
- hostPath:
path: /etc/os-release
name: host-os-release
- hostPath:
path: /sys
name: host-sys
- hostPath:
path: /usr/lib
name: host-usr-lib
- hostPath:
path: /etc/kubernetes/node-feature-discovery/source.d/
name: source-d
- hostPath:
path: /etc/kubernetes/node-feature-discovery/features.d/
name: features-d
- configMap:
name: nfd-worker-conf
name: nfd-worker-conf

View File

@ -0,0 +1,12 @@
sc:
local:
enabled: true
default: true
provisioner_image: openebs/provisioner-localpv:3.3.0
linux_utils_image: openebs/linux-utils:3.3.0
path: /var/openebs/local
nfs: # each k8s_cluster node should install nfs-utils
enabled: false
default: false
server: "{{ groups['nfs']|first }}"
path: /share/kubernetes

View File

@ -0,0 +1,9 @@
---
- name: Generate local manifest
template:
src: "local-volume.yaml"
dest: "/etc/kubernetes/addons/local-volume.yaml"
- name: deploy local
command: |
/usr/local/bin/kubectl apply -f /etc/kubernetes/addons/local-volume.yaml

View File

@ -0,0 +1,6 @@
---
- include_tasks: local.yaml
when: sc.local.enabled
- include_tasks: nfs.yaml
when: sc.nfs.enabled

View File

@ -0,0 +1,11 @@
---
- name: Sync nfs provisioner helm to remote
copy:
src: "{{ work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
dest: "/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
- name: Deploy nfs provisioner
command: |
helm install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz --namespace kube-system \
--set nfs.server={{ sc.nfs.server }} --set nfs.path={{ sc.nfs.path }} \
--set storageClass.defaultClass={% if (sc.local.default) %}true{% else %}false{% endif %}

View File

@ -0,0 +1,150 @@
---
#Sample storage classes for OpenEBS Local PV
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
storageclass.beta.kubernetes.io/is-default-class: {% if (sc.local.default) %}"true"{% else %}"false"{% endif %}
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "{{ sc.local.path }}"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
# Create Maya Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: openebs-maya-operator
namespace: kube-system
---
# Define Role that allows operations on K8s pods/deployments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
rules:
- apiGroups: ["*"]
resources: ["nodes", "nodes/proxy"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["statefulsets", "daemonsets"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["resourcequotas", "limitranges"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "poddisruptionbudgets", "certificatesigningrequests"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["openebs.io"]
resources: [ "*"]
verbs: ["*"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
# Bind the Service Account with the Role Privileges.
# TODO: Check if default account also needs to be there
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
subjects:
- kind: ServiceAccount
name: openebs-maya-operator
namespace: kube-system
roleRef:
kind: ClusterRole
name: openebs-maya-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-localpv-provisioner
namespace: kube-system
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.3.0
spec:
selector:
matchLabels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.3.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: IfNotPresent
image: {{ sc.local.provisioner_image }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator-lite"
- name: OPENEBS_IO_HELPER_IMAGE
value: "{{ sc.local.linux_utils_image }}"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
#- name: LEADER_ELECTION_ENABLED
# value: "true"
# OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
# to the helper pod launched by local-pv hostpath provisioner
#- name: OPENEBS_IO_IMAGE_PULL_SECRETS
# value: ""
livenessProbe:
exec:
command:
- sh
- -c
- test $(pgrep -c "^provisioner-loc.*") = 1
initialDelaySeconds: 30
periodSeconds: 60

View File

@ -0,0 +1,101 @@
artifact:
arch: [ "amd64" ]
# offline artifact package for kk.
# artifact_file: /tmp/kubekey.tar.gz
# the md5_file of artifact_file.
# artifact_md5: /tmp/artifact.md5
# how to generate cert file.support: IfNotPresent, Always
gen_cert_policy: IfNotPresent
artifact_url:
etcd:
amd64: |
{% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% endif %}
kubeadm:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% endif %}
kubelet:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubelet{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubelet{% endif %}
kubectl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubectl{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubectl{% endif %}
cni:
amd64: |
{% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% endif %}
helm:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-amd64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-arm64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-arm64.tar.gz{% endif %}
crictl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% endif %}
docker:
amd64: |
{% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% endif %}
cridockerd:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% endif %}
containerd:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% endif %}
runc:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% endif %}
calicoctl:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% endif %}
dockercompose:
amd64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% endif %}
# registry:
# amd64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-amd64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-amd64.tgz{% endif %}
# arm64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-arm64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-arm64.tgz{% endif %}
harbor:
amd64: |
{% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% endif %}
# arm64: |
# {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% endif %}
# keepalived:
# amd64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% endif %}
# arm64: |
# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% endif %}
oras:
amd64: |
https://github.com/oras-project/oras/releases/download/{{ oras_version }}/oras_{{ oras_version|slice:'1:' }}_linux_amd64.tar.gz
arm64: |
https://github.com/oras-project/oras/releases/download/{{ oras_version }}/oras_{{ oras_version|slice:'1:' }}_linux_arm64.tar.gz
cilium: https://helm.cilium.io/cilium-{{ cilium_version }}.tgz
kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ kubeovn_version }}.tgz
hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ hybridnet_version }}/hybridnet-{{ hybridnet_version }}.tgz
nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz
images:
auth: []
list: []

View File

@ -0,0 +1,284 @@
---
- name: Check binaries for etcd
command: |
artifact_name={{ artifact.artifact_url.etcd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.etcd[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.etcd[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- etcd_version | defined && etcd_version != ""
- name: Check binaries for kube
command: |
kube_path={{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ item }}
if [ ! -f $kube_path/kubelet ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubelet[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubelet {{ artifact.artifact_url.kubelet[item] }}
fi
if [ ! -f $kube_path/kubeadm ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubeadm[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubeadm {{ artifact.artifact_url.kubeadm[item] }}
fi
if [ ! -f $kube_path/kubectl ]; then
mkdir -p $kube_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubectl[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $kube_path/kubectl {{ artifact.artifact_url.kubectl[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- kube_version | defined && kube_version != ""
- name: Check binaries for cni
command: |
artifact_name={{ artifact.artifact_url.cni[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni/{{ cni_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cni[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cni[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- cni_version | defined && cni_version != ""
- name: Check binaries for helm
command: |
artifact_name={{ artifact.artifact_url.helm[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.helm[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.helm[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- helm_version | defined && helm_version != ""
- name: Check binaries for crictl
command: |
artifact_name={{ artifact.artifact_url.crictl[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.crictl[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.crictl[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- crictl_version | defined && crictl_version != ""
- name: Check binaries for docker
command: |
artifact_name={{ artifact.artifact_url.docker[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.docker[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.docker[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- docker_version | defined && docker_version != ""
- name: Check binaries for cridockerd
command: |
artifact_name={{ artifact.artifact_url.cridockerd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cridockerd[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cridockerd[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- cridockerd_version | defined && cridockerd_version != ""
- name: Check binaries for containerd
command: |
artifact_name={{ artifact.artifact_url.containerd[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.containerd[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.containerd[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- containerd_version | defined && containerd_version != ""
- name: Check binaries for runc
command: |
artifact_name={{ artifact.artifact_url.runc[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.runc[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.runc[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- runc_version | defined && runc_version != ""
- name: Check binaries for calicoctl
command: |
artifact_name={{ artifact.artifact_url.calicoctl[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni/{{ calico_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.calicoctl[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.calicoctl[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- calico_version | defined && calico_version != ""
- name: Check binaries for registry
command: |
artifact_name={{ artifact.artifact_url.registry[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.registry[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.registry[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- registry_version | defined && registry_version != ""
- name: Check binaries for docker-compose
command: |
compose_name=docker-compose
compose_path={{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ item }}
if [ ! -f $compose_path/$compose_name ]; then
mkdir -p $compose_path
# download online
curl -L -o $compose_path/$compose_name {{ artifact.artifact_url.dockercompose[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- dockercompose_version | defined && dockercompose_version != ""
- name: Check binaries for harbor
command: |
harbor_name={{ artifact.artifact_url.harbor[item]|split:"/"|last }}
harbor_path={{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ item }}
if [ ! -f $harbor_path/$harbor_name ]; then
mkdir -p $harbor_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.harbor[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $harbor_path/$harbor_name {{ artifact.artifact_url.harbor[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- harbor_version | defined && harbor_version != ""
- name: Check binaries for keepalived
command: |
artifact_name={{ artifact.artifact_url.keepalived[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.keepalived[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.keepalived[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- keepalived_version | defined && keepalived_version != ""
- name: Check binaries for oras
command: |
artifact_name={{ artifact.artifact_url.oras[item]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/oras/{{ oras_version }}/{{ item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.oras[item] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.oras[item] }}
fi
loop: "{{ artifact.arch }}"
when:
- oras_version | defined && oras_version != ""

View File

@ -0,0 +1,44 @@
---
- name: Check binaries for cilium
command: |
artifact_name={{ artifact.artifact_url.cilium|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.cilium }}
fi
when: cilium_version | defined
- name: Check binaries for kubeovn
command: |
artifact_name={{ artifact.artifact_url.kubeovn|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.kubeovn }}
fi
when: kubeovn_version | defined
- name: Check binaries for hybridnet
command: |
artifact_name={{ artifact.artifact_url.hybridnet|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.hybridnet }}
fi
when: hybridnet_version | defined
- name: Check binaries for nfs_provisioner
command: |
artifact_name={{ artifact.artifact_url.nfs_provisioner|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/sc
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
cd $artifact_path && helm pull {{ artifact.artifact_url.nfs_provisioner }}
fi
when: nfs_provisioner_version| defined

View File

@ -0,0 +1,10 @@
---
- name: Login in oras
command: |
oras login {{ item.url }} -u {{ item.user }} -p {{ item.password }}
loop: "{{ artifact.images.auth }}"
- name: Copy images to local
command: |
oras cp --to-oci-layout {{ item }} {{ work_dir }}/kubekey/images/{{ item|split:"/"|join:"="|safe }}
loop: "{{ artifact.images.list }}"

View File

@ -0,0 +1,28 @@
---
- name: Create work_dir
command: |
if [ ! -d "{{ work_dir }}" ]; then
mkdir -p {{ work_dir }}
fi
- name: Extract artifact to work_dir
command: |
if [ ! -f "{{ artifact.artifact_file }}" ]; then
tar -zxvf {{ artifact.artifact_file }} -C {{ work_dir }}
fi
when: artifact.artifact_file | defined
- name: Download binaries
block:
# the binaries which download by curl
- include_tasks: download_by_curl.yaml
# the binaries which download by helm
- include_tasks: download_by_helm.yaml
# download image package by oras
- include_tasks: download_by_oras.yaml
- include_tasks: pki.yaml
- name: Chown work_dir to sudo
command: |
chown -R ${SUDO_UID}:${SUDO_GID} {{ work_dir }}

View File

@ -0,0 +1,34 @@
---
- name: Generate root ca file
gen_cert:
cn: root
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/root.key"
out_cert: "{{ work_dir }}/kubekey/pki/root.crt"
- name: Generate etcd cert file
gen_cert:
root_key: "{{ work_dir }}/kubekey/pki/root.key"
root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
cn: etcd
sans: |
[{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/etcd.key"
out_cert: "{{ work_dir }}/kubekey/pki/etcd.crt"
when: groups['etcd']|length > 0
- name: Generate registry image cert file
gen_cert:
root_key: "{{ work_dir }}/kubekey/pki/root.key"
root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
cn: image_registry
sans: |
[{% for h in groups['image_registry'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
date: 87600h
policy: "{{ artifact.gen_cert_policy }}"
out_key: "{{ work_dir }}/kubekey/pki/image_registry.key"
out_cert: "{{ work_dir }}/kubekey/pki/image_registry.crt"
when: groups['image_registry']|length > 0

View File

@ -0,0 +1,2 @@
ntp_servers: [ "cn.pool.ntp.org" ]
timezone: Asia/Shanghai

View File

@ -0,0 +1,42 @@
---
- name: Configure ntp server
command: |
chronyConfigFile="/etc/chrony.conf"
if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
chronyConfigFile="/etc/chrony/chrony.conf"
fi
# clear old server
sed -i '/^server/d' $chronyConfigFile
# disable pool
sed -i 's/^pool /#pool /g' $chronyConfigFile
# delete allow
sed -i '/^allow/d' $chronyConfigFile
# allow client
echo "allow 0.0.0.0/0" >> $chronyConfigFile
# delete local
sed -i '/^local/d' $chronyConfigFile
# add local
echo "local stratum 10" >> $chronyConfigFile
# add server
{% for server in ntp_servers %}
{% for _,v in inventory_hosts %}
{% if (v.inventory_name == server) %}{% set server = v.internal_ipv4%}{% endif %}
{% endfor %}
grep -q '^server {{ server }} iburst' $chronyConfigFile||sed '1a server {{ server }} iburst' -i $chronyConfigFile
{% endfor %}
- name: Set timezone
command: |
timedatectl set-timezone {{ timezone }}
timedatectl set-ntp true
when: timezone | defined
- name: Restart ntp server
command: |
chronyService="chronyd.service"
if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
chronyService="chrony.service"
fi
systemctl restart $chronyService
when:
- ntp_servers | defined or timezone | defined

View File

@ -0,0 +1,75 @@
---
- name: Sync repository
block:
- name: Sync repository file
ignore_errors: true
copy:
src: "{{ work_dir }}/kubekey/repository/{{ os.release.ID_LIKE }}-{{ os.release.VERSION_ID|safe }}-{{ binary_type.stdout }}.iso"
dest: "/tmp/kubekey/repository.iso"
- name: Mount iso file
command: |
if [ -f "/tmp/kubekey/repository.iso" ]; then
mount -t iso9660 -o loop /tmp/kubekey/repository.iso /tmp/kubekey/iso
fi
rescue:
- name: Unmount iso file
command: |
if [ -f "/tmp/kubekey/repository.iso" ]; then
umount /tmp/kubekey/iso
fi
- name: Init repository
block:
- name: Init debian repository
command: |
if [ -f "/tmp/kubekey/repository.iso" ];then
# backup
mv /etc/apt/sources.list /etc/apt/sources.list.kubekey.bak
mv /etc/apt/sources.list.d /etc/apt/sources.list.d.kubekey.bak
mkdir -p /etc/apt/sources.list.d
# add repository
rm -rf /etc/apt/sources.list.d/*
echo 'deb [trusted=yes] file://tmp/kubekey/iso /' > /etc/apt/sources.list.d/kubekey.list
# update repository
apt-get update
# install
apt install -y socat conntrack ipset ebtables chrony ipvsadm
# reset repository
rm -rf /etc/apt/sources.list.d
mv /etc/apt/sources.list.kubekey.bak /etc/apt/sources.list
mv /etc/apt/sources.list.d.kubekey.bak /etc/apt/sources.list.d
else
apt install -y socat conntrack ipset ebtables chrony ipvsadm
fi
when: os.release.ID_LIKE == "debian"
- name: Init rhel repository
command: |
if [ -f "/tmp/kubekey/repository.iso" ];then
# backup
mv /etc/yum.repos.d /etc/yum.repos.d.kubekey.bak
mkdir -p /etc/yum.repos.d
# add repository
rm -rf /etc/yum.repos.d/*
cat << EOF > /etc/yum.repos.d/CentOS-local.repo
[base-local]
name=rpms-local
baseurl=file://%s
enabled=1
gpgcheck=0
EOF
# update repository
yum clean all && yum makecache
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
# reset repository
rm -rf /etc/yum.repos.d
mv /etc/yum.repos.d.kubekey.bak /etc/yum.repos.d
else
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
fi
when: os.release.ID_LIKE == "rhel fedora"

View File

@ -0,0 +1,25 @@
---
- include_tasks: init_repository.yaml
- include_tasks: init_ntpserver.yaml
- name: Reset tmp dir
command: |
if [ -d /tmp/kubekey ]; then
rm -rf /tmp/kubekey
fi
mkdir -m 777 -p /tmp/kubekey
- name: Set hostname
command: |
hostnamectl set-hostname {{ inventory_name }} && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ inventory_name }}/g' /etc/hosts
- name: Sync init os to remote
template:
src: init-os.sh
dest: /etc/kubekey/scripts/init-os.sh
mode: 0755
- name: Execute init os script
command: |
chmod +x /etc/kubekey/scripts/init-os.sh && /etc/kubekey/scripts/init-os.sh

View File

@ -0,0 +1,193 @@
#!/usr/bin/env bash
# Copyright 2020 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
swapoff -a
sed -i /^[^#]*swap*/s/^/\#/g /etc/fstab
# See https://github.com/kubernetes/website/issues/14457
if [ -f /etc/selinux/config ]; then
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
fi
# for ubuntu: sudo apt install selinux-utils
# for centos: yum install selinux-policy
if command -v setenforce &> /dev/null
then
setenforce 0
getenforce
fi
echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-arptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_reserved_ports = 30000-32767' >> /etc/sysctl.conf
echo 'net.core.netdev_max_backlog = 65535' >> /etc/sysctl.conf
echo 'net.core.rmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.wmem_max = 33554432' >> /etc/sysctl.conf
echo 'net.core.somaxconn = 32768' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_syn_backlog = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh1 = 512' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh2 = 2048' >> /etc/sysctl.conf
echo 'net.ipv4.neigh.default.gc_thresh3 = 4096' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_retries2 = 15' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_tw_buckets = 1048576' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_orphans = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.udp_rmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.udp_wmem_min = 131072' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.rp_filter = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_accept = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.arp_ignore = 1' >> /etc/sysctl.conf
echo 'net.ipv4.conf.default.arp_ignore = 1' >> /etc/sysctl.conf
echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf
echo 'vm.swappiness = 0' >> /etc/sysctl.conf
echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_instances = 524288' >> /etc/sysctl.conf
echo 'fs.inotify.max_user_watches = 10240001' >> /etc/sysctl.conf
echo 'fs.pipe-max-size = 4194304' >> /etc/sysctl.conf
echo 'fs.aio-max-nr = 262144' >> /etc/sysctl.conf
echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf
echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf
echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf
#add for ipv6
echo 'net.ipv6.conf.all.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.default.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.lo.disable_ipv6 = 0' >> /etc/sysctl.conf
echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.conf
#See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.rp_filter ?= ?(0|1|2)@net.ipv4.conf.all.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.rp_filter ?= ?(0|1|2)@net.ipv4.conf.default.rp_filter = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_forward ?= ?(0|1)@net.ipv4.ip_forward = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-arptables ?= ?(0|1)@net.bridge.bridge-nf-call-arptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-ip6tables ?= ?(0|1)@net.bridge.bridge-nf-call-ip6tables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.bridge.bridge-nf-call-iptables ?= ?(0|1)@net.bridge.bridge-nf-call-iptables = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.ip_local_reserved_ports ?= ?([0-9]{1,}-{0,1},{0,1}){1,}@net.ipv4.ip_local_reserved_ports = 30000-32767@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.max_map_count ?= ?([0-9]{1,})@vm.max_map_count = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.swappiness ?= ?([0-9]{1,})@vm.swappiness = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_instances ?= ?([0-9]{1,})@fs.inotify.max_user_instances = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.pid_max ?= ?([0-9]{1,})@kernel.pid_max = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?vm.overcommit_memory ?= ?(0|1|2)@vm.overcommit_memory = 0@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.inotify.max_user_watches ?= ?([0-9]{1,})@fs.inotify.max_user_watches = 524288@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.pipe-max-size ?= ?([0-9]{1,})@fs.pipe-max-size = 4194304@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.netdev_max_backlog ?= ?([0-9]{1,})@net.core.netdev_max_backlog = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.rmem_max ?= ?([0-9]{1,})@net.core.rmem_max = 33554432@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.wmem_max ?= ?([0-9]{1,})@net.core.wmem_max = 33554432@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_syn_backlog ?= ?([0-9]{1,})@net.ipv4.tcp_max_syn_backlog = 1048576@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh1 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh1 = 512@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh2 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh2 = 2048@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.neigh.default.gc_thresh3 ?= ?([0-9]{1,})@net.ipv4.neigh.default.gc_thresh3 = 4096@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.core.somaxconn ?= ?([0-9]{1,})@net.core.somaxconn = 32768@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.eth0.arp_accept ?= ?(0|1)@net.ipv4.conf.eth0.arp_accept = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?fs.aio-max-nr ?= ?([0-9]{1,})@fs.aio-max-nr = 262144@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_retries2 ?= ?([0-9]{1,})@net.ipv4.tcp_retries2 = 15@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_tw_buckets ?= ?([0-9]{1,})@net.ipv4.tcp_max_tw_buckets = 1048576@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.tcp_max_orphans ?= ?([0-9]{1,})@net.ipv4.tcp_max_orphans = 65535@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.udp_rmem_min ?= ?([0-9]{1,})@net.ipv4.udp_rmem_min = 131072@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.udp_wmem_min ?= ?([0-9]{1,})@net.ipv4.udp_wmem_min = 131072@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.all.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.all.arp_ignore = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?net.ipv4.conf.default.arp_ignore ?= ??(0|1|2)@net.ipv4.conf.default.arp_ignore = 1@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.watchdog_thresh ?= ?([0-9]{1,})@kernel.watchdog_thresh = 5@g" /etc/sysctl.conf
sed -r -i "s@#{0,}?kernel.hung_task_timeout_secs ?= ?([0-9]{1,})@kernel.hung_task_timeout_secs = 5@g" /etc/sysctl.conf
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/sysctl.conf
mv $tmpfile /etc/sysctl.conf
# ulimit
echo "* soft nofile 1048576" >> /etc/security/limits.conf
echo "* hard nofile 1048576" >> /etc/security/limits.conf
echo "* soft nproc 65536" >> /etc/security/limits.conf
echo "* hard nproc 65536" >> /etc/security/limits.conf
echo "* soft memlock unlimited" >> /etc/security/limits.conf
echo "* hard memlock unlimited" >> /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nofile ?([0-9]{1,})@\* soft nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nofile ?([0-9]{1,})@\* hard nofile 1048576@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft nproc ?([0-9]{1,})@\* soft nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard nproc ?([0-9]{1,})@\* hard nproc 65536@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* soft memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* soft memlock unlimited@g" /etc/security/limits.conf
sed -r -i "s@#{0,}?\* hard memlock ?([0-9]{1,}([TGKM]B){0,1}|unlimited)@\* hard memlock unlimited@g" /etc/security/limits.conf
tmpfile="$$.tmp"
awk ' !x[$0]++{print > "'$tmpfile'"}' /etc/security/limits.conf
mv $tmpfile /etc/security/limits.conf
systemctl stop firewalld 1>/dev/null 2>/dev/null
systemctl disable firewalld 1>/dev/null 2>/dev/null
systemctl stop ufw 1>/dev/null 2>/dev/null
systemctl disable ufw 1>/dev/null 2>/dev/null
modinfo br_netfilter > /dev/null 2>&1
if [ $? -eq 0 ]; then
modprobe br_netfilter
mkdir -p /etc/modules-load.d
echo 'br_netfilter' > /etc/modules-load.d/kubekey-br_netfilter.conf
fi
modinfo overlay > /dev/null 2>&1
if [ $? -eq 0 ]; then
modprobe overlay
echo 'overlay' >> /etc/modules-load.d/kubekey-br_netfilter.conf
fi
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
cat > /etc/modules-load.d/kube_proxy-ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF
modprobe nf_conntrack_ipv4 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
echo 'nf_conntrack_ipv4' > /etc/modules-load.d/kube_proxy-ipvs.conf
else
modprobe nf_conntrack
echo 'nf_conntrack' > /etc/modules-load.d/kube_proxy-ipvs.conf
fi
sysctl -p
sed -i ':a;$!{N;ba};s@# kubekey hosts BEGIN.*# kubekey hosts END@@' /etc/hosts
sed -i '/^$/N;/\n$/N;//D' /etc/hosts
cat >>/etc/hosts<<EOF
# kubekey hosts BEGIN
{% for _,hv in inventory_hosts %}
{% if (hv.internal_ipv4|defined) %}{{ hv.internal_ipv4 }} {{ hv.inventory_name }} {{ hv.inventory_name }}.{{ kubernetes.cluster_name|default_if_none:'cluster.local' }}{% endif %}
{% if (hv.internal_ipv6|defined) %}{{ hv.internal_ipv6 }} {{ hv.inventory_name }} {{ hv.inventory_name }}.{{ kubernetes.cluster_name|default_if_none:'cluster.local' }}{% endif %}
{% endfor %}
# kubekey hosts END
EOF
sync
echo 3 > /proc/sys/vm/drop_caches
# Make sure the iptables utility doesn't use the nftables backend.
update-alternatives --set iptables /usr/sbin/iptables-legacy >/dev/null 2>&1 || true
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true
update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true

View File

@ -0,0 +1,4 @@
renew_certs:
enabled: false
is_docker: "{% if (cri.container_manager == 'docker') %}true{% else %}false{% endif %}"
is_kubeadm_alpha: "{% if (kube_version|version:'<v1.20.0') %}true{% else %}false{% endif %}"

View File

@ -0,0 +1,5 @@
[Unit]
Description=Renew K8S control plane certificates
[Service]
Type=oneshot
ExecStart=/usr/local/bin/kube-scripts/k8s-certs-renew.sh

View File

@ -0,0 +1,7 @@
[Unit]
Description=Timer to renew K8S control plane certificates
[Timer]
OnCalendar=Mon *-*-* 03:00:00
Unit=k8s-certs-renew.service
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
---
- name: Generate renew script
template:
src: renew_script.sh
dest: /usr/local/bin/kube-scripts/renew_script.sh
mode: 0755
- name: Sync renew service
copy:
src: k8s-certs-renew.service
dest: /etc/systemd/system/k8s-certs-renew.service
- name: Sync renew timer
copy:
src: k8s-certs-renew.timer
dest: /etc/systemd/system/k8s-certs-renew.timer
- name: Enable renew service
command:
systemctl daemon-reload && systemctl enable --now k8s-certs-renew.timer

View File

@ -0,0 +1,29 @@
#!/bin/bash
{% if (renew_certs.is_kubeadm_alpha=="true") %}
kubeadmCerts='/usr/local/bin/kubeadm alpha certs'
{% else %}
kubeadmCerts='/usr/local/bin/kubeadm certs'
{% endif %}
getCertValidDays() {
local earliestExpireDate; earliestExpireDate=$(${kubeadmCerts} check-expiration | grep -o "[A-Za-z]\{3,4\}\s\w\w,\s[0-9]\{4,\}\s\w*:\w*\s\w*\s*" | xargs -I {} date -d {} +%s | sort | head -n 1)
local today; today="$(date +%s)"
echo -n $(( ($earliestExpireDate - $today) / (24 * 60 * 60) ))
}
echo "## Expiration before renewal ##"
${kubeadmCerts} check-expiration
if [ $(getCertValidDays) -lt 30 ]; then
echo "## Renewing certificates managed by kubeadm ##"
${kubeadmCerts} renew all
echo "## Restarting control plane pods managed by kubeadm ##"
{% if (renew_certs.is_docker=="true") %}
$(which docker | grep docker) ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs $(which docker | grep docker) rm -f
{% else %}
$(which crictl | grep crictl) pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs $(which crictl | grep crictl) rmp -f
{% endif %}
echo "## Updating /root/.kube/config ##"
cp /etc/kubernetes/admin.conf /root/.kube/config
fi
echo "## Waiting for apiserver to be up again ##"
until printf "" 2>>/dev/null >>/dev/tcp/127.0.0.1/6443; do sleep 1; done
echo "## Expiration after renewal ##"
${kubeadmCerts} check-expiration

View File

@ -0,0 +1,12 @@
cri:
# support: systemd, cgroupfs
cgroup_driver: systemd
sandbox_image: "k8s.gcr.io/pause:3.5"
# support: containerd,docker,crio
container_manager: docker
# the endpoint of containerd
cri_socket: "{% if (cri.container_manager=='containerd') %}unix:///var/run/containerd.sock{% endif %}"
# containerd:
# data_root: /var/lib/containerd
docker:
data_root: /var/lib/docker

View File

@ -0,0 +1,26 @@
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,36 @@
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
[Service]
Type=notify
ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image {{ .SandBoxImage }}
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,47 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
# After=network-online.target firewalld.service containerd.service
# Wants=network-online.target
# Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,45 @@
---
- name: Check if runc is installed
ignore_errors: true
command: runc --version
register: runc_install_version
- name: Sync Runc Binary to remote
copy:
src: "{{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ binary_type.stdout }}/runc.{{ binary_type.stdout }}"
dest: "/usr/local/bin/runc"
mode: 0755
when: runc_install_version.stderr != ""
- name: Check if Containerd is installed
ignore_errors: true
command: containerd --version
register: containerd_install_version
- name: Sync Containerd Binary to remote
copy:
src: "{{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ binary_type.stdout }}/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
when: containerd_install_version.stderr != ""
- name: Unpackage Containerd binary
command: |
tar -xvf /tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: containerd_install_version.stderr != ""
- name: Generate Containerd config file
template:
src: containerd.config
dest: /etc/containerd/config.toml
when: containerd_install_version.stderr != ""
- name: Generate Containerd Service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
when: containerd_install_version.stderr != ""
- name: Start Containerd
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
when: containerd_install_version.stderr != ""

View File

@ -0,0 +1,21 @@
---
- name: Check if crictl is installed
ignore_errors: true
command: crictl --version
register: crictl_install_version
- name: Sync crictl binary to remote
copy:
src: "{{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ binary_type.stdout }}/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: crictl_install_version.stderr != ""
- name: Unpackage crictl binary
command: |
tar -xvf /tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: crictl_install_version.stderr != ""
- name: Generate crictl config file
template:
src: crictl.config
dest: /etc/crictl.yaml

View File

@ -0,0 +1,33 @@
---
- name: Check if cri-dockerd is installed
ignore_errors: true
command: cri-dockerd --version
register: cridockerd_install_version
- name: Sync cri-dockerd Binary to remote
copy:
src: "{{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ binary_type.stdout }}/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: cridockerd_install_version.stderr != ""
- name: Generate cri-dockerd config file
template:
src: cri-dockerd.config
dest: /etc/cri-dockerd.yaml
when: cridockerd_install_version.stderr != ""
- name: Unpackage cri-dockerd binary
command: |
tar -xvf /tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: cridockerd_install_version.stderr != ""
- name: Generate cri-dockerd Service file
template:
src: cri-dockerd.service
dest: /etc/systemd/system/cri-dockerd.service
when: cridockerd_install_version.stderr != ""
- name: Start cri-dockerd service
command: |
systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
when: cridockerd_install_version.stderr != ""

View File

@ -0,0 +1,40 @@
---
- name: Check if docker is installed
ignore_errors: true
command: docker --version
register: docker_install_version
- name: Sync docker binary to remote
copy:
src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
when: docker_install_version.stderr != ""
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
when: docker_install_version.stderr != ""
- name: Generate docker config file
template:
src: docker.config
dest: /etc/docker/daemon.json
when: docker_install_version.stderr != ""
- name: Generate docker service file
copy:
src: docker.service
dest: /etc/systemd/system/docker.service
when: docker_install_version.stderr != ""
- name: Generate containerd service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
when: docker_install_version.stderr != ""
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
when: docker_install_version.stderr != ""

View File

@ -0,0 +1,19 @@
---
# install crictl
- include_tasks: install_crictl.yaml
# install docker
- include_tasks: install_docker.yaml
when: cri.container_manager == "docker"
# install containerd
- include_tasks: install_containerd.yaml
when: cri.container_manager == "containerd"
# install cridockerd
- include_tasks: install_cridockerd.yaml
when:
- cri.container_manager == "docker"
- kube_version|version:'>=v1.24.0'

View File

@ -0,0 +1,76 @@
version = 2
root = {{ cri.containerd.data_root|default_if_none:"/var/lib/containerd" }}
state = "/run/containerd"
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[ttrpc]
address = ""
uid = 0
gid = 0
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "{{ cri.sandbox_image }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = {% if (cri.cgroup_driver=="systemd") %}true{% else %}false{% endif %}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
{% if (registry.mirrors|defined) %}
endpoint = {{ registry.mirrors|to_json|safe }}
{% else %}
endpoint = ["https://registry-1.docker.io"]
{% endif %}
{% for ir in registry.insecure_registries %}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ ir }}"]
endpoint = ["http://{{ ir }}"]
{% endfor %}
{% if (registry.auths|length > 0) %}
[plugins."io.containerd.grpc.v1.cri".registry.configs]
{% for ir in registry.auths %}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".auth]
username = "{{ ir.username }}"
password = "{{ ir.password }}"
{% if (ir.ca_file|defined) %}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".tls]
ca_file = "{{ ir.ca_file }}"
cert_file = "{{ ir.crt_file }}"
key_file = "{{ ir.key_file }}"
insecure_skip_verify = {{ ir.skip_ssl }}
{% endif %}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,5 @@
runtime-endpoint: {{ cri.container_runtime_endpoint }}
image-endpoint: {{ cri.container_runtime_endpoint }}
timeout: 5
debug: false
pull-image-on-create: false

View File

@ -0,0 +1,19 @@
{
"log-opts": {
"max-size": "5m",
"max-file":"3"
},
{% if (cri.docker.data_root|defined) %}
"data-root": "{{ cri.docker.data_root }}",
{% endif %}
{% if (registry.mirrors|defined) %}
"registry-mirrors": {{ registry.mirrors|to_json|safe }},
{% endif %}
{% if (registry.insecure_registries|defined) %}
"insecure-registries": {{ registry.insecure_registries|to_json|safe }},
{% endif %}
{% if (cri.docker.bridge_ip|defined) %}
"bip": "{{ cri.docker.bridge_ip }}",
{% endif %}
"exec-opts": ["native.cgroupdriver={{ cri.cgroup_driver }}"]
}

View File

@ -0,0 +1,24 @@
etcd:
# endpoints: ["https://172.1.1.1:2379"]
# etcd binary
state: new
# env config
env:
election_timeout: 5000
heartbeat_interval: 250
compaction_retention: 8
snapshot_count: 10000
data_dir: /var/lib/etcd
# metrics: basic
# quota_backend_bytes: 100
# max_request_bytes: 100
# max_snapshots: 100
# max_wals: 5
# log_level: info
# unsupported_arch: arm64
# backup config
backup:
backup_dir: /var/lib/etcd-backup
keep_backup_number: 5
# etcd_backup_script: /usr/local/bin/kube-scripts/backup-etcd.sh
on_calendar: "*-*-* *:00/30:00"

View File

@ -0,0 +1,5 @@
[Unit]
Description=Backup ETCD
[Service]
Type=oneshot
ExecStart=/usr/local/bin/kube-scripts/backup_etcd.sh

View File

@ -0,0 +1,16 @@
[Unit]
Description=etcd
After=network.target
[Service]
User=root
Type=notify
EnvironmentFile=/etc/etcd.env
ExecStart=/usr/local/bin/etcd
NotifyAccess=all
RestartSec=10s
LimitNOFILE=40000
Restart=always
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,29 @@
---
- name: Generate default backup etcd script
template:
src: "backup.sh"
dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
mode: 777
when:
- ! etcd.backup.etcd_backup_script|defined
- name: Sync custom backup etcd script
template:
src: "{{ etcd.backup.etcd_backup_script }}"
dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
mode: 777
when: etcd.backup.etcd_backup_script|defined
- name: Generate backup etcd service
copy:
src: "backup.service"
dest: "/etc/systemd/system/backup-etcd.service"
- name: Generate backup etcd timer
template:
src: "backup.timer"
dest: "/etc/systemd/system/backup-etcd.timer"
- name: Enable etcd timer
command: |
systemctl daemon-reload && systemctl enable --now backup-etcd.timer

View File

@ -0,0 +1,38 @@
---
- name: Sync etcd binary to node
copy:
src: "{{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ binary_type.stdout }}/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- name: Extract etcd binary
command: |
tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz \
--wildcards etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}/etcd*
- name: Sync ca file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/ssl/etcd/ssl/ca.crt"
- name: Sync etcd cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/ssl/etcd/ssl/server.crt"
- name: Sync etcd key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/ssl/etcd/ssl/server.key"
- name: Generate etcd env file
template:
src: "etcd.env"
dest: "/etc/etcd.env"
- name: Generate etcd systemd service file
copy:
src: "etcd.service"
dest: "/etc/systemd/system/etcd.service"
- name: Start etcd service
command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd

View File

@ -0,0 +1,26 @@
---
- name: Check if etcd is installed
ignore_errors: true
command: etcd --version
run_once: true
register: etcd_install_version
- name: Init etcd
when: etcd_install_version.stderr != ""
block:
- name: Add etcd user
command: |
useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
- name: Create etcd directories
command: |
if [ ! -d "{{ item }}" ]; then
mkdir -p {{ item }} && chown -R etcd {{ item }}
fi
loop:
- "/var/lib/etcd"
- include_tasks: install_etcd.yaml
when: etcd_install_version.stderr != ""
- include_tasks: backup_etcd.yaml
when: etcd_install_version.stderr != ""

View File

@ -0,0 +1,33 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
ETCDCTL_PATH='/usr/local/bin/etcdctl'
ENDPOINTS='https://{{ internal_ipv4 }}:2379'
ETCD_DATA_DIR="{{ etcd.env.data_dir }}"
BACKUP_DIR="{{ etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
KEEPBACKUPNUMBER='{{ etcd.backup.keep_backup_number }}'
((KEEPBACKNUMBER++))
ETCDCTL_CERT="/etc/ssl/etcd/ssl/server.crt"
ETCDCTL_KEY="/etc/ssl/etcd/ssl/server.key"
ETCDCTL_CA_FILE="/etc/ssl/etcd/ssl/ca.crt"
[ ! -d $BACKUP_DIR ] && mkdir -p $BACKUP_DIR
export ETCDCTL_API=2;$ETCDCTL_PATH backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR
sleep 3
{
export ETCDCTL_API=3;$ETCDCTL_PATH --endpoints="$ENDPOINTS" snapshot save $BACKUP_DIR/snapshot.db \
--cacert="$ETCDCTL_CA_FILE" \
--cert="$ETCDCTL_CERT" \
--key="$ETCDCTL_KEY"
} > /dev/null
sleep 3
cd $BACKUP_DIR/../ && ls -lt |awk '{if(NR > '$KEEPBACKUPNUMBER'){print "rm -rf "$9}}'|sh

View File

@ -0,0 +1,7 @@
[Unit]
Description=Timer to backup ETCD
[Timer]
OnCalendar={{ etcd.backup.on_calendar }}
Unit=backup-etcd.service
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,53 @@
ETCD_DATA_DIR={{ etcd.env.data_dir }}
ETCD_ADVERTISE_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
ETCD_INITIAL_CLUSTER_STATE={{ etcd.state }}
ETCD_LISTEN_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }},https://127.0.0.1:2379
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
ETCD_NAME={{ inventory_name }}
ETCD_PROXY=off
ETCD_ENABLE_V2=true
ETCD_INITIAL_CLUSTER={% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}{{ hv.inventory_name }}={{ hv.internal_ipv4|stringformat:"https://%s:2380" }}{% if (not forloop.Last) %},{% endif %}{% endfor %}
ETCD_ELECTION_TIMEOUT={{ etcd.env.election_timeout }}
ETCD_HEARTBEAT_INTERVAL={{ etcd.env.heartbeat_interval }}
ETCD_AUTO_COMPACTION_RETENTION={{ etcd.env.compaction_retention }}
ETCD_SNAPSHOT_COUNT={{ etcd.env.snapshot_count }}
{% if (etcd.metrics|defined) %}
ETCD_METRICS={{ etcd.env.metrics }}
{% endif %}
{% if (etcd.env.quota_backend_bytes|defined) %}
ETCD_QUOTA_BACKEND_BYTES={{ etcd.env.quota_backend_bytes }}
{% endif %}
{% if (etcd.env.max_request_bytes|defined) %}
ETCD_MAX_REQUEST_BYTES={{ etcd.env.max_request_bytes }}
{% endif %}
{% if (etcd.env.max_snapshots|defined) %}
ETCD_MAX_SNAPSHOTS={{ etcd.env.max_snapshots }}
{% endif %}
{% if (etcd.env.max_wals|defined) %}
ETCD_MAX_WALS={{ etcd.env.max_wals }}
{% endif %}
{% if (etcd.env.log_level|defined) %}
ETCD_LOG_LEVEL={{ etcd.env.log_level }}
{% endif %}
{% if (etcd.env.unsupported_arch|defined) %}
ETCD_UNSUPPORTED_ARCH={{ etcd.env.unsupported_arch }}
{% endif %}
# TLS settings
ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt
ETCD_CERT_FILE=/etc/ssl/etcd/ssl/server.crt
ETCD_KEY_FILE=/etc/ssl/etcd/ssl/server.key
ETCD_CLIENT_CERT_AUTH=true
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt
ETCD_PEER_CERT_FILE=/etc/ssl/etcd/ssl/server.crt
ETCD_PEER_KEY_FILE=/etc/ssl/etcd/ssl/server.key
ETCD_PEER_CLIENT_CERT_AUTH=true
# CLI settings
ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.crt
ETCDCTL_CERT=/etc/ssl/etcd/ssl/server.crt
ETCDCTL_KEY=/etc/ssl/etcd/ssl/server.key

View File

@ -0,0 +1,44 @@
image_registry:
# registry type. support: harbor, registry
type: harbor
# Virtual IP address for repository High Availability. the Virtual IP address should be available.
# ha_vip: 192.168.122.59
harbor:
admin_password: Harbor12345
registry:
version: 2
config:
storage: nfs
nfs_dir: /share/registry
storage:
filesystem:
rootdirectory: /var/lib/registry
# nfs_mount: /repository/registry # if set. will mount rootdirectory to nfs server in nfs_mount.
# azure:
# accountname: accountname
# accountkey: base64encodedaccountkey
# container: containername
# gcs:
# bucket: bucketname
# keyfile: /path/to/keyfile
# credentials:
# type: service_account
# project_id: project_id_string
# private_key_id: private_key_id_string
# private_key: private_key_string
# client_email: client@example.com
# client_id: client_id_string
# auth_uri: http://example.com/auth_uri
# token_uri: http://example.com/token_uri
# auth_provider_x509_cert_url: http://example.com/provider_cert_url
# client_x509_cert_url: http://example.com/client_cert_url
# rootdirectory: /gcs/object/name/prefix
# s3:
# accesskey: awsaccesskey
# secretkey: awssecretkey
# region: us-west-1
# regionendpoint: http://myobjects.local
# bucket: bucketname
# keyid: mykeyid
# rootdirectory: /s3/object/name/prefix

View File

@ -0,0 +1,26 @@
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,47 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
# After=network-online.target firewalld.service containerd.service
# Wants=network-online.target
# Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/local/bin/dockerd --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,40 @@
---
- name: Check if docker is installed
ignore_errors: true
command: docker --version
register: docker_install_version
- name: Sync docker binary to remote
copy:
src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
when: docker_install_version.stderr != ""
- name: Generate docker config file
template:
src: "docker.config"
dest: "/etc/docker/daemon.json"
when: docker_install_version.stderr != ""
- name: Unpackage docker binary
command: |
tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
when: docker_install_version.stderr != ""
- name: Generate docker service file
copy:
src: "docker.service"
dest: "/etc/systemd/system/docker.service"
when: docker_install_version.stderr != ""
- name: Generate containerd service file
copy:
src: "containerd.service"
dest: "/etc/systemd/system/containerd.service"
when: docker_install_version.stderr != ""
- name: Start docker service
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
when: docker_install_version.stderr != ""

View File

@ -0,0 +1,13 @@
---
- name: Check if docker-compose is installed
ignore_errors: true
command: docker-compose --version
register: dockercompose_install_version
- name: Sync docker-compose to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ binary_type.stdout }}/docker-compose"
dest: "/usr/local/bin/docker-compose"
mode: 0755
when:
- dockercompose_install_version.stderr != ""

View File

@ -0,0 +1,57 @@
---
- name: Check image registry if installed
ignore_errors: true
command: systemctl status harbor.service
register: image_registry_service
- name: Sync harbor package to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ binary_type.stdout }}/harbor-offline-installer-{{ harbor_version }}.tgz"
dest: "/opt/harbor/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz"
when: image_registry_service.stderr != ""
- name: Untar harbor package
command: |
cd /opt/harbor/{{ harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ harbor_version }}.tgz
when: image_registry_service.stderr != ""
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt"
when: image_registry_service.stderr != ""
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key"
when: image_registry_service.stderr != ""
- name: Generate harbor config
template:
src: "harbor.config"
dest: "/opt/harbor/{{ harbor_version }}/harbor/harbor.yml"
when: image_registry_service.stderr != ""
- name: Generate keepalived docker compose
template:
src: "harbor_keepalived.docker-compose"
dest: "/opt/harbor/{{ harbor_version }}/harbor/docker-compose-keepalived.yml"
when:
- image_registry.ha_vip | defined
- image_registry_service.stderr != ""
- name: Install harbor
command: |
cd /opt/harbor/{{ harbor_version }}/harbor && /bin/bash install.sh
when: image_registry_service.stderr != ""
- name: Register harbor service
template:
src: "harbor.service"
dest: "/etc/systemd/system/harbor.service"
when: image_registry_service.stderr != ""
- name: Start harbor service
command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service
when: image_registry_service.stderr != ""

View File

@ -0,0 +1,19 @@
---
- name: Sync keepalived image to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ binary_type.stdout }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
dest: "/opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
- name: Load keeplived image
command: |
docker load -i /opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz
- name: Sync keeplived config to remote
template:
src: "keeplived.config"
dest: "/opt/keeplived/{{ keepalived_version }}/keepalived.conf"
- name: Sync healthcheck shell to remote
template:
src: "keepalived.healthcheck"
dest: "/opt/keeplived/{{ keepalived_version }}/healthcheck.sh"

View File

@ -0,0 +1,65 @@
---
- name: Check image registry if installed
ignore_errors: true
command: systemctl status registry.service
register: image_registry_service
- name: Sync registry image to remote
copy:
src: "{{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ binary_type.stdout }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
dest: "/opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
when: image_registry_service.stderr != ""
- name: Mount NFS dir
command: |
if [ {{ os.release.ID_LIKE }} == 'debian' ]; then
yum update && yum install -y nfs-utils
elif [ {{ os.release.ID_LIKE }} == 'rhel fedora' ]
apt update && apt install -y nfs-common
fi
nfsHostName={{ groups['nfs']|first }}
{% set hv=inventory_hosts['$nfsHostName'] %}
mount -t nfs {{ hv.internal_ipv4 }}:{{ image_registry.registry.storage.filesystem.nfs_mount }} {{ image_registryregistry.storage.filesystem.rootdirectory }}
when:
- image_registry.registry.storage.filesystem.nfs_mount | defined
- groups['nfs']|length == 1
- image_registry_service.stderr != ""
- name: Load registry image
command: |
docker load -i /opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz
when: image_registry_service.stderr != ""
- name: Sync image registry cert file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
dest: "/opt/registry/{{ registry_version }}/ssl/server.crt"
when: image_registry_service.stderr != ""
- name: Sync image registry key file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/image_registry.key"
dest: "/opt/registry/{{ registry_version }}/ssl/server.key"
when: image_registry_service.stderr != ""
- name: Generate registry docker compose
template:
src: "registry.docker-compose"
dest: "/opt/registry/{{ registry_version }}/docker-compose.yml"
when: image_registry_service.stderr != ""
- name: Generate registry config
template:
src: "registry.config"
dest: "/opt/registry/{{ registry_version }}/config.yml"
when: image_registry_service.stderr != ""
- name: Register registry service
copy:
src: "registry.service"
dest: "/etc/systemd/system/registry.service"
when: image_registry_service.stderr != ""
- name: Start registry service
command: systemctl daemon-reload && systemctl start registry.service && systemctl enable registry.service
when: image_registry_service.stderr != ""

View File

@ -0,0 +1,51 @@
---
- name: Check if image to load
ignore_errors: true
command: |
ls {{ work_dir }}/kubekey/images/
register: local_images_dir
- name: Sync oras to remote
copy:
src: "{{ work_dir }}/kubekey/oras/{{ oras_version }}/{{ binary_type.stdout }}/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz"
when: local_images_dir.stderr == ""
- name: Unpackage oras binary
command: tar -zxvf /tmp/kubekey/oras_{{ oras_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz -C /usr/local/bin oras
when: local_images_dir.stderr == ""
- name: Sync images package to remote
copy:
src: "{{ work_dir }}/kubekey/images/"
dest: "/tmp/kubekey/images/"
when: local_images_dir.stderr == ""
- name: Sync images to registry
command: |
for dir in /tmp/kubekey/images/*; do
if [ ! -d "$dir" ]; then
# only deal directory
continue
fi
IFS='=' read -ra array <<< "${dir##*/}"
if [ $(echo ${my_array[@]} | wc -w) > 3 ]; then
project=${array[1]}
dest_image=$(echo "${array[@]:2:-1}" | tr ' ' '/')
tag=${array[-1]}
else
echo "unsupported image"
exit 1
fi
# if project is not exist, create if
http_code=$(curl -Iks -u "admin:{{ image_registry.harbor.admin_password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
if [ $http_code == 404 ]; then
# create project
curl -u "admin:{{ image_registry.harbor.admin_password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
fi
oras cp --to-username admin --to-password {{ image_registry.harbor.admin_password }} ${dir##*/} localhost/${project}/${dest_image}:${tag}
done
when: local_images_dir.stderr == ""

View File

@ -0,0 +1,16 @@
---
- include_tasks: install_docker.yaml
- include_tasks: install_docker_compose.yaml
- include_tasks: install_keepalived.yaml
when: image_registry.ha_vip | defined
- name: Install image registry
block:
- include_tasks: install_registry.yaml
when: image_registry.type == 'registry'
- include_tasks: install_harbor.yaml
when: image_registry.type == 'harbor'
- include_tasks: load_images.yaml

View File

@ -0,0 +1,19 @@
{
"log-opts": {
"max-size": "5m",
"max-file":"3"
},
{% if (cri.docker.data_root|defined) %}
"data-root": {{ cri.docker.data_root }},
{% endif %}
{% if (registry.mirrors|defined) %}
"registry-mirrors": {{ registry.mirrors|to_json|safe }},
{% endif %}
{% if (registry.insecure_registries|defined) %}
"insecure-registries": {{ registry.insecure_registries|to_json|safe }},
{% endif %}
{% if (cri.docker.bridge_ip|defined) %}
"bip": "{{ cri.docker.bridge_ip }}",
{% endif %}
"exec-opts": ["native.cgroupdriver=systemd"]
}

View File

@ -0,0 +1,311 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: {{ internal_ipv4 }}
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
certificate: /opt/harbor/{{ harbor_version }}/ssl/server.crt
private_key: /opt/harbor/{{ harbor_version }}/ssl/server.key
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: {{ image_registry.harbor.admin_password }}
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 100
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: 900
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_lifetime: 5m
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_idle_time: 0
# The default data volume
data_volume: /data
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disable: false
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false
#
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
skip_java_db_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesn't affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
offline_scan: false
#
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
security_check: vuln
#
# insecure The flag to skip verifying registry certificate
insecure: false
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
# github_token: xxx
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
job_loggers:
- STD_OUTPUT
- FILE
# - DB
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
logger_sweeper_duration: 1 #days
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 3
# HTTP client timeout for webhook job
webhook_job_http_client_timeout: 3 #seconds
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.10.0
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_db_index: 7
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
# # there's a known issue when using external redis username ref:https://github.com/goharbor/harbor/issues/18892
# # if you care about the image pull/push performance, please refer to this https://github.com/goharbor/harbor/wiki/Harbor-FAQs#external-redis-username-password-usage
# # username:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# trivy_db_index: 5
# idle_timeout_seconds: 30
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy
# metric:
# enabled: false
# port: 9090
# path: /metrics
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differenciate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # # jaeger should be 1.26 or newer.
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # # export trace data by jaeger.thrift in compact mode
# # agent_port: 6831
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # # timeout is in seconds
# # timeout: 10
# Enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
# Cache layer configurations
# If this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which can especially help to improve the performance of high concurrent
# manifest pulling.
# NOTICE
# If you are deploying Harbor in HA mode, make sure that all the harbor
# instances have the same behaviour, all with caching enabled or disabled,
# otherwise it can lead to potential data inconsistency.
cache:
# not enabled by default
enabled: false
# keep cache for one day by default
expire_hours: 24
# Harbor core configurations
# Uncomment to enable the following harbor core related configuration items.
# core:
# # The provider for updating project quota(usage), there are 2 options, redis or db,
# # by default is implemented by db but you can switch the updation via redis which
# # can improve the performance of high concurrent pushing to the same project,
# # and reduce the database connections spike and occupies.
# # By redis will bring up some delay for quota usage updation for display, so only
# # suggest switch provider to redis if you were ran into the db connections spike aroud
# # the scenario of high concurrent pushing to same project, no improvment for other scenes.
# quota_update_provider: redis # Or db

View File

@ -0,0 +1,12 @@
[Unit]
Description=harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/harbor/docker-compose.yml up {% if (image_registry.ha_vip | defined) %}&& /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/docker-compose-keepalived.yml up{% endif %}
ExecStop=/usr/local/bin/docker-compose -p harbor down
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,26 @@
---
version: '2.3'
services:
keepalived:
image: osixia/keepalived: {{ keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
depends_on:
- proxy
volumes:
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- harbor

View File

@ -0,0 +1,31 @@
vrrp_script healthcheck {
script "/etc/keepalived/healthcheck.sh"
interval 10
fall 2
rise 2
timeout 5
init_fail
}
global_defs {
script_user root
router_id harbor-ha
enable_script_security
lvs_sync_daemon ens3 VI_1
}
vrrp_instance VI_1 {
state BACKUP
interface ens3
virtual_router_id 31
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass k8s-test
}
virtual_ipaddress {
{{ image_registry.ha_vip }}
}
track_script {
healthcheck
}
}

View File

@ -0,0 +1,17 @@
#!/bin/bash
{% if (image_registry.type=='registry') %}
# registry service
service=registry:5000
{% else %}
# harbor service
service=harbor:80
{% endif %}
nc -zv -w 2 $service > /dev/null 2>&1
if [ $? -eq 0 ]; then
exit 0
else
exit 1
fi

View File

@ -0,0 +1,218 @@
version: 0.1
log:
accesslog:
disabled: true
level: info
formatter: text
fields:
service: registry
environment: staging
# hooks:
# - type: mail
# disabled: true
# levels:
# - panic
# options:
# smtp:
# addr: mail.example.com:25
# username: mailuser
# password: password
# insecure: true
# from: sender@example.com
# to:
# - errors@example.com
storage:
{% if (image_registryregistry.storage.filesystem|length != 0) %}
filesystem:
rootdirectory: {{ image_registryregistry.storage.filesystem.rootdirectory }}
maxthreads: 100
{% endif %}
{% if (image_registryregistry.storage.azure|length != 0) %}
azure:
accountname: {{ image_registryregistry.storage.azure.accountname }}
accountkey: {{ image_registryregistry.storage.azure.accountkey }}
container: {{ image_registryregistry.storage.azure.container }}
{% endif %}
{% if (image_registryregistry.storage.gcs|length != 0) %}
gcs:
bucket: {{ image_registryregistry.storage.gcs.bucket }}
keyfile: {{ image_registryregistry.storage.gcs.keyfile }}
credentials:
type: service_account
project_id: {{ image_registryregistry.storage.gcs.credentials.project_id }}
private_key_id: {{ image_registryregistry.storage.gcs.credentials.private_key_id }}
private_key: {{ image_registryregistry.storage.gcs.credentials.private_key }}
client_email: {{ image_registryregistry.storage.gcs.credentials.client_email }}
client_id: {{ image_registryregistry.storage.gcs.credentials.client_id }}
auth_uri: {{ image_registryregistry.storage.gcs.credentials.auth_uri }}
token_uri: {{ image_registryregistry.storage.gcs.credentials.token_uri }}
auth_provider_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.auth_provider_x509_cert_url }}
client_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.client_x509_cert_url }}
rootdirectory: {{ image_registryregistry.storage.gcs.rootdirectory }}
{% endif %}
{% if (image_registryregistry.storage.s3|length != 0) %}
s3:
accesskey: {{ image_registryregistry.storage.s3.accesskey }}
secretkey: {{ image_registryregistry.storage.s3.secretkey }}
region: {{ image_registryregistry.storage.s3.region }}
regionendpoint: {{ image_registryregistry.storage.s3.regionendpoint }}
forcepathstyle: true
accelerate: false
bucket: {{ image_registryregistry.storage.s3.bucket }}
encrypt: true
keyid: {{ image_registryregistry.storage.s3.keyid }}
secure: true
v4auth: true
chunksize: 5242880
multipartcopychunksize: 33554432
multipartcopymaxconcurrency: 100
multipartcopythresholdsize: 33554432
rootdirectory: {{ image_registryregistry.storage.s3.rootdirectory }}
usedualstack: false
loglevel: debug
{% endif %}
inmemory: # This driver takes no parameters
delete:
enabled: false
redirect:
disable: false
cache:
blobdescriptor: redis
blobdescriptorsize: 10000
maintenance:
uploadpurging:
enabled: true
age: 168h
interval: 24h
dryrun: false
readonly:
enabled: false
#auth:
# silly:
# realm: silly-realm
# service: silly-service
# token:
# autoredirect: true
# realm: token-realm
# service: token-service
# issuer: registry-token-issuer
# rootcertbundle: /root/certs/bundle
# htpasswd:
# realm: basic-realm
# path: /path/to/htpasswd
#middleware:
# registry:
# - name: ARegistryMiddleware
# options:
# foo: bar
# repository:
# - name: ARepositoryMiddleware
# options:
# foo: bar
# storage:
# - name: cloudfront
# options:
# baseurl: https://my.cloudfronted.domain.com/
# privatekey: /path/to/pem
# keypairid: cloudfrontkeypairid
# duration: 3000s
# ipfilteredby: awsregion
# awsregion: us-east-1, use-east-2
# updatefrequency: 12h
# iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json
# - name: redirect
# options:
# baseurl: https://example.com/
http:
addr: localhost:5000
# prefix: /my/nested/registry/
# host: https://myregistryaddress.org:5000
secret: asecretforlocaldevelopment
relativeurls: false
draintimeout: 60s
tls:
certificate: /etc/registry/ssl/server.crt
key: /etc/registry/ssl/server.key
# clientcas:
# - /path/to/ca.pem
# - /path/to/another/ca.pem
# letsencrypt:
# cachefile: /path/to/cache-file
# email: emailused@letsencrypt.com
# hosts: [myregistryaddress.org]
# directoryurl: https://acme-v02.api.letsencrypt.org/directory
# debug:
# addr: localhost:5001
# prometheus:
# enabled: true
# path: /metrics
headers:
X-Content-Type-Options: [nosniff]
http2:
disabled: false
h2c:
enabled: false
#notifications:
# events:
# includereferences: true
# endpoints:
# - name: alistener
# disabled: false
# url: https://my.listener.com/event
# headers: <http.Header>
# timeout: 1s
# threshold: 10
# backoff: 1s
# ignoredmediatypes:
# - application/octet-stream
# ignore:
# mediatypes:
# - application/octet-stream
# actions:
# - pull
#redis:
# addr: localhost:6379
# password: asecret
# db: 0
# dialtimeout: 10ms
# readtimeout: 10ms
# writetimeout: 10ms
# pool:
# maxidle: 16
# maxactive: 64
# idletimeout: 300s
# tls:
# enabled: false
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
# file:
# - file: /path/to/checked/file
# interval: 10s
# http:
# - uri: http://server.to.check/must/return/200
# headers:
# Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==]
# statuscode: 200
# timeout: 3s
# interval: 10s
# threshold: 3
# tcp:
# - addr: redis-server.domain.com:6379
# timeout: 3s
# interval: 10s
## threshold: 3
#proxy:
# remoteurl: https://registry-1.docker.io
# username: [username]
# password: [password]
# ttl: 168h
#validation:
# manifests:
# urls:
# allow:
# - ^https?://([^/]+\.)*example\.com/
# deny:
# - ^https?://www\.example\.com/

View File

@ -0,0 +1,54 @@
---
version: '2.3'
services:
registry:
image: registry:{{ registry_version }}
container_name: registry
restart: always
dns_search: .
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- type: bind
source: /opt/registry/{{ registry_version }}/ssl/
target: /etc/registry/ssl/
- type: bind
source: /opt/registry/{{ registry_version }}/config.yml
target: /etc/docker/registry/config.yml
port:
- 443:5000
networks:
- registry
{% if (image_registry.ha_vip | defined) %}
keepalived:
image: osixia/keepalived: {{ keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
depends_on:
- registry
volumes:
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- registry
{% endif %}
networks:
registry:
external: false

View File

@ -0,0 +1,12 @@
[Unit]
Description=harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ registry_version }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -p registry down
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,161 @@
kubernetes:
cluster_name: cluster.local
# support: flannel, calico
kube_network_plugin: calico
# the minimal version of kubernetes to be installed.
kube_version_min_required: v1.19.10
# the image repository of kubernetes.
image_repository: "registry.k8s.io"
# memory size for each kube_worker node.(unit kB)
# should be greater than or equal to minimal_node_memory_mb.
minimal_node_memory_mb: 10
# the maximum number of pods that can be run on each node.
max_pods: 110
audit: false
security_enhancement: "{{ security_enhancement|default_if_none:false }}"
networking:
dns_domain: cluster.local
# it supports two value like value1,value2.
# the first value is ipv4_cidr, the last value is ipv6_cidr.
pod_cidr: 10.233.64.0/18
service_cidr: 10.233.0.0/18
dns_image: "registry.k8s.io/coredns/coredns:v1.11.1"
dns_cache_image: "kubesphere/k8s-dns-node-cache:1.22.20"
dns_service_ip: "{{ kubernetes.networking.service_cidr|ip_range|slice:':3'|last }}"
# Specify a stable IP address or DNS name for the control plane.
# control_plane_endpoint: lb.kubesphere.local
apiserver:
port: 6443
certSANs: []
extra_args:
bind-address: 0.0.0.0
feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
controller_manager:
# Set the Pod CIDR size of a node.
kube_network_node_prefix: 24
extra_args:
feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
scheduler:
extra_args:
feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
kube_proxy:
enabled: true
# support ipvs and iptables
mode: "ipvs"
config:
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
kubelet:
max_pod: 110
pod_pids_limit: 10000
feature_gates: {}
container_log_max_size: 5Mi
container_log_max_files: 3
extra_args: {}
coredns:
dns_etc_hosts: []
# the config for zones
zone_configs:
# DNS zones to match. default use port of 53. the format like this.
# .: all dns zone.
# example.com: match *.example.com use dns server with port 53
# example.com:54: match *.example.com use dns server with port 54
- zones: [".:53"]
additional_configs:
- errors
- ready
- prometheus :9153
- loop
- reload
- loadbalance
cache: 30
kubernetes:
zones:
- "{{ kubernetes.networking.dns_domain }}"
# rewrite performs internal message rewriting.
# rewrite:
# # specify multiple rules and an incoming query matches multiple rules.
# # continue: if the rewrite rule is not matched, the next rule will be matched.
# # stop: if the rewrite rule is not matched, the next rule will not be matched.
# - rule: continue
# # support: type, name, class, edns0, ttl, cname
# # type: the type field of the request will be rewritten. FROM/TO must be a DNS record type (A, MX, etc.).
# # name: the query name in the request is rewritten; by default this is a full match of the name
# # class: the class of the message will be rewritten.
# # edns0: an EDNS0 option can be appended to the request as described below in the EDNS0 Options section.
# # ttl: the TTL value in the response is rewritten.
# # cname: the CNAME target if the response has a CNAME record
# field: name
# # this optional element can be specified for a name or ttl field.
# # exact: the name must be exactly the same as the value.
# # prefix: the name must start with the value.
# # suffix: the name must end with the value.
# # substring: the name must contain the value.
# # regex: the name must match the value.
# type: exact
# value: "example.com example2.com"
# # for field name further options are possible controlling the response rewrites.
# # answer auto: the names in the response is rewritten in a best effort manner.
# # answer name FROM TO: the query name in the response is rewritten matching the from regex pattern.
# # answer value FROM TO: the names in the response is rewritten matching the from regex pattern.
# options: ""
forward:
# the base domain to match for the request to be forwarded.
- from: "."
# the destination endpoints to forward to. The TO syntax allows you to specify a protocol
to: ["/etc/resolv.conf"]
# a space-separated list of domains to exclude from forwarding.
except: []
# use TCP even when the request comes in over UDP.
force_tcp: false
# try first using UDP even when the request comes in over TCP.
# If response is truncated (TC flag set in response) then do another attempt over TCP.
prefer_udp: false
# the number of subsequent failed health checks that are needed before considering an upstream to be down
# If 0, the upstream will never be marked as down (nor health checked).
# max_fails: 2
# expire (cached) connections after this time,
# expire: 10s
# define the TLS properties for TLS connection.
# tls:
# # the path to the certificate file.
# cert_file: ""
# # the path to the key file.
# key_file: ""
# # the path to the CA certificate file.
# ca_file: ""
# # allows you to set a server name in the TLS configuration
# tls_servername: ""
# specifies the policy to use for selecting upstream servers. The default is random.
# random: a policy that implements random upstream selection.
# round_robin: a policy that selects hosts based on round robin ordering.
# sequential: a policy that selects hosts based on sequential ordering.
# policy: "random"
# configure the behaviour of health checking of the upstream servers
# format: DURATION [no_rec] [domain FQDN]
# <duration>: use a different duration for health checking, the default duration is 0.5s.
# no_rec:optional argument that sets the RecursionDesired-flag of the dns-query used in health checking to false. The flag is default true.
# domain FQDN: set the domain name used for health checks to FQDN. If not configured, the domain name used for health checks is .
# health_check: ""
# limit the number of concurrent queries to MAX.
max_concurrent: 1000
kube_vip:
enabled: false
# support:BGP, ARP
mode: BGP
image: plndr/kube-vip:v0.7.2
haproxy:
enabled: false
health_port: 8081
image: library/haproxy:2.9.6-alpine
etcd: # todo should apply zone variable
# It is possible to deploy etcd with three methods.
# external: Deploy etcd cluster with external etcd cluster.
# internal: Deploy etcd cluster by static pod.
deployment_type: external
image: "k8s.gcr.io/etcd:3.5.0"
custom_label: {}

View File

@ -0,0 +1,123 @@
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps", "serviceaccounts/token"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Config
clusters:
- name: kube-auditing
cluster:
server: https://SHOULD_BE_REPLACED:6443/audit/webhook/event
insecure-skip-tls-verify: true
contexts:
- context:
cluster: kube-auditing
user: ""
name: default-context
current-context: default-context
preferences: {}
users: []

View File

@ -0,0 +1,15 @@
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=http://kubernetes.io/docs/
[Service]
CPUAccounting=true
MemoryAccounting=true
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,21 @@
---
- name: Generate coredns config
template:
src: dns/coredns.deployment
dest: /etc/kubernetes/coredns.yaml
- name: Apply coredns config
command: "kubectl apply -f /etc/kubernetes/coredns.yaml"
- name: Get cluster api
command: |
/usr/local/bin/kubectl get svc -n kube-system coredns -o jsonpath='{.spec.clusterIP}'
register: core_dns_ip
- name: Generate nodelocaldns deployment
template:
src: dns/coredns.deployment
dest: /etc/kubernetes/nodelocaldns.yaml
- name: Apply coredns deployment
command: "kubectl apply -f /etc/kubernetes/nodelocaldns.yaml"

View File

@ -0,0 +1,15 @@
---
- name: Generate haproxy config
template:
src: haproxy/haproxy.cfg
dest: /etc/kubekey/haproxy/haproxy.cfg
- name: Get md5 for haproxy config
command: |
md5sum /etc/kubekey/haproxy/haproxy.cfg | cut -d\" \" -f1
register: cfg_md5
- name: Genrate haproxy manifest
template:
src: haproxy/haproxy.yaml
dest: /etc/kubernetes/manifests/haproxy.yaml

View File

@ -0,0 +1,31 @@
---
# install with static pod: https://kube-vip.io/docs/installation/static/
- name: Get interface for ipv4
command: |
ip route | grep ' {{ internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
register: interface
- name: Should ipv4 interface not be empty
assert: interface.stdout != ""
fail_msg: "{{ internal_ipv4 }} cannot be found in network interface."
- name: Generate kubevip manifest
template:
src: "kubevip/kubevip.{{ kubernetes.kube_vip.mode }}"
dest: "/etc/kubernetes/manifests/kubevip.yaml"
- name: Update kubelet config
command: |
sed -i 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
systemctl restart kubelet
- name: Update kube-proxy config
command: |
set -o pipefail && /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml \
| sed 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' \
| /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf replace -f -
/usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0
- name: Update hosts file
command: |
sed -i 's#.* {{ kubernetes.control_plane_endpoint }}#127.0.0.1 {{ kubernetes.control_plane_endpoint }}s#g' /etc/hosts

View File

@ -0,0 +1,76 @@
---
- name: Add kube user
command: |
useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube || :
- name: Create kube directories
command: |
if [ ! -d "{{ item.path }}" ]; then
mkdir -p {{ item.path }} && chown kube -R {{ item.chown }}
fi
loop:
- {path: "/usr/local/bin", chown: "/usr/local/bin"}
- {path: "/etc/kubernetes", chown: "/etc/kubernetes"}
- {path: "/etc/kubernetes/pki", chown: "/etc/kubernetes/pki"}
- {path: "/etc/kubernetes/manifests", chown: "/etc/kubernetes/manifests"}
- {path: "/usr/local/bin/kube-scripts", chown: "/usr/local/bin/kube-scripts"}
- {path: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec", chown: "/usr/libexec/kubernetes"}
- {path: "/etc/cni/net.d", chown: "/etc/cni"}
- {path: "/opt/cni/bin", chown: "/opt/cni"}
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
- name: Sync external etcd config
when:
- kubernetes.etcd.deployment_type == 'external' && groups['etcd']|length > 0
block:
- name: Sync etcd ca file to remote
copy:
src: "{{ work_dir }}/kubekey/pki/root.crt"
dest: "/etc/kubernetes/pki/etcd/ca.crt"
- name: Sync etcd cert files to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.crt"
dest: "/etc/kubernetes/pki/etcd/client.crt"
- name: Sync etcd key files to remote
copy:
src: "{{ work_dir }}/kubekey/pki/etcd.key"
dest: "/etc/kubernetes/pki/etcd/client.key"
- name: Sync audit policy file to remote
copy:
src: "audit"
dest: "/etc/kubernetes/audit/"
when:
- kubernetes.audit
- name: Generate kubeadm init config
template:
src: "kubeadm/{% if (kube_version|version:'>=v1.24.0') %}kubeadm-init.v1beta3{% else %}kubeadm-init.v1beta2{% endif %}"
dest: "/etc/kubernetes/kubeadm-config.yaml"
- name: Init kubernetes cluster
block:
- name: Init kubernetes by kubeadm
command: |
/usr/local/bin/kubeadm init \
--config=/etc/kubernetes/kubeadm-config.yaml \
--ignore-preflight-errors=FileExisting-crictl,ImagePull \
{% if (not kubernetes.kube_proxy.enabled) %}--skip-phases=addon/kube-proxy{% endif %}
rescue:
- name: Reset kubeadm if init failed
command: kubeadm reset -f {% if (cri.cri_socket !="") %}--cri-socket {{ cri.cri_socket }}{% endif %}
- name: Remote master taint
ignore_errors: true
command: |
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- name: Copy kubeconfig to default dir
command: |
if [ ! -d /root/.kube ]; then
mkdir -p /root/.kube
fi
cp -f /etc/kubernetes/admin.conf /root/.kube/config
when: kube_node_info_important.stderr != ""

View File

@ -0,0 +1,69 @@
---
- name: Check if helm is installed
ignore_errors: true
command: helm version
register: helm_install_version
- name: Sync helm to remote
copy:
src: "{{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ binary_type.stdout }}/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
when: helm_install_version.stderr != ""
- name: Install helm
command: |
tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ binary_type.stdout }}/helm
when: helm_install_version.stderr != ""
- name: Check if kubeadm is installed
ignore_errors: true
command: kubeadm version
register: kubeadm_install_version
- name: Sync kubeadm to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubeadm"
dest: "/usr/local/bin/kubeadm"
mode: 0755
when: kubeadm_install_version.stderr != ""
- name: Check if kubectl is installed
ignore_errors: true
command: kubectl version
register: kubectl_install_version
- name: Sync kubectl to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubectl"
dest: "/usr/local/bin/kubectl"
mode: 0755
when: kubectl_install_version.stderr != ""
- name: Check if kubelet is installed
ignore_errors: true
command: systemctl status kubelet
register: kubelet_install_version
- name: Sync kubelet to remote
copy:
src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubelet"
dest: "/usr/local/bin/kubelet"
mode: 0755
when: kubelet_install_version.stderr != ""
- name: Sync kubelet env to remote
template:
src: "kubeadm/kubelet.env"
dest: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
when: kubelet_install_version.stderr != ""
- name: Sync kubelet service to remote
copy:
src: "kubelet.service"
dest: "/etc/systemd/system/kubelet.service"
when: kubelet_install_version.stderr != ""
- name: Register kubelet service
command: systemctl daemon-reload && systemctl enable kubelet.service
when: kubelet_install_version.stderr != ""

Some files were not shown because too many files have changed in this diff Show More