mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
feat: Installation refinement steps (#2536)
Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
61d0bb2b51
commit
25f3de2cde
|
|
@ -158,10 +158,14 @@ mv $tmpfile /etc/security/limits.conf
|
|||
|
||||
# ------------------------ 5. Firewall Configurations ---------------------------
|
||||
|
||||
systemctl stop firewalld 1>/dev/null 2>/dev/null
|
||||
systemctl disable firewalld 1>/dev/null 2>/dev/null
|
||||
systemctl stop ufw 1>/dev/null 2>/dev/null
|
||||
systemctl disable ufw 1>/dev/null 2>/dev/null
|
||||
if systemctl is-active firewalld --quiet; then
|
||||
systemctl stop firewalld 1>/dev/null 2>/dev/null
|
||||
systemctl disable firewalld 1>/dev/null 2>/dev/null
|
||||
fi
|
||||
if systemctl is-active ufw --quiet; then
|
||||
systemctl stop ufw 1>/dev/null 2>/dev/null
|
||||
systemctl disable ufw 1>/dev/null 2>/dev/null
|
||||
fi
|
||||
|
||||
# ------------------------ 6. System Module Settings ----------------------------
|
||||
|
||||
|
|
@ -251,7 +255,7 @@ cat >>/etc/hosts<<EOF
|
|||
EOF
|
||||
|
||||
sync
|
||||
echo 3 > /proc/sys/vm/drop_caches
|
||||
# echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Make sure the iptables utility doesn't use the nftables backend.
|
||||
{{- if and .internal_ipv4 (.internal_ipv4 | ne "") }}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
- all
|
||||
vars_files:
|
||||
- vars/create_cluster.yaml
|
||||
- vars/create_cluster_kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
|
|
@ -30,19 +31,6 @@
|
|||
- init/init-artifact
|
||||
- init/init-cert
|
||||
|
||||
# image registry
|
||||
- hosts:
|
||||
- image_registry
|
||||
gather_facts: true
|
||||
roles:
|
||||
- install/image-registry
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
roles:
|
||||
- init/init-artifact
|
||||
- init/init-cert
|
||||
|
||||
# init os
|
||||
- hosts:
|
||||
- etcd
|
||||
|
|
@ -76,13 +64,20 @@
|
|||
gather_facts: true
|
||||
roles:
|
||||
- install/cri
|
||||
- install/kubernetes
|
||||
|
||||
- hosts:
|
||||
- kube_control_plane
|
||||
roles:
|
||||
- role: install/certs
|
||||
when: .renew_certs.enabled
|
||||
- kubernetes/pre-kubernetes
|
||||
- kubernetes/init-kubernetes
|
||||
- kubernetes/join-kubernetes
|
||||
- role: kubernetes/certs
|
||||
when:
|
||||
- .kubernetes.renew_certs.enabled
|
||||
- .groups.kube_control_plane | default list | has .inventory_name
|
||||
post_tasks:
|
||||
- name: Add custom label to cluster
|
||||
command: |
|
||||
{{- range $k, $v := .kubernetes.custom_label }}
|
||||
/usr/local/bin/kubectl label --overwrite node {{ $.hostname }} {{ $k }}={{ $v }}
|
||||
{{- end }}
|
||||
when: .kubernetes.custom_label | len | lt 0
|
||||
|
||||
- hosts:
|
||||
- kube_control_plane|random
|
||||
|
|
|
|||
|
|
@ -31,12 +31,6 @@ k8s_registry: |
|
|||
|
||||
security_enhancement: false
|
||||
|
||||
kubernetes:
|
||||
etcd:
|
||||
# It is possible to deploy etcd with three methods.
|
||||
# external: Deploy etcd cluster with external etcd cluster.
|
||||
# internal: Deploy etcd cluster by static pod.
|
||||
deployment_type: external
|
||||
cri:
|
||||
# support: containerd,docker
|
||||
container_manager: docker
|
||||
|
|
@ -21,8 +21,10 @@ kubernetes:
|
|||
# the whole service_cidr in cluster. support: ipv4; ipv6; ipv4,ipv6.
|
||||
service_cidr: 10.233.0.0/18
|
||||
dns_domain: cluster.local
|
||||
dns_image: |
|
||||
{{ .k8s_registry }}/coredns/coredns:v1.8.6
|
||||
dns_image:
|
||||
repository: |
|
||||
{{ .k8s_registry }}/coredns
|
||||
tag: v1.8.6
|
||||
dns_cache_image: |
|
||||
{{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20
|
||||
dns_service_ip: |
|
||||
|
|
@ -160,19 +162,10 @@ kubernetes:
|
|||
host: lb.kubesphere.local
|
||||
port: "{{ .kubernetes.apiserver.port }}"
|
||||
# support local, kube_vip, haproxy
|
||||
# when type is local, # if set will write in /etc/hosts.
|
||||
# - for control-plane: will use 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# - for worker: will use {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
type: local
|
||||
# if set will write in /etc/hosts.
|
||||
# when type is local, it will write the first node in "kube_control_plane" groups as the control_plane_endpoint's server.
|
||||
etc_hosts: |
|
||||
{{- if .kubernetes.control_plane_endpoint.type | eq "local" }}
|
||||
{{- $internalIPv4 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv4" | default "" }}
|
||||
{{- $internalIPv6 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv6" | default "" }}
|
||||
{{- if ne $internalIPv4 "" }}
|
||||
{{ $internalIPv4 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- else if ne $internalIPv6 "" }}
|
||||
{{ $internalIPv6 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
kube_vip:
|
||||
# the ip address of node net. usage in node network interface: "eth0"
|
||||
# address:
|
||||
|
|
@ -190,7 +183,11 @@ kubernetes:
|
|||
# It is possible to deploy etcd with three methods.
|
||||
# external: Deploy etcd cluster with external etcd cluster.
|
||||
# internal: Deploy etcd cluster by static pod.
|
||||
# deployment_type: external
|
||||
image: |
|
||||
{{ .k8s_registry }}/etcd:3.5.0
|
||||
deployment_type: external
|
||||
image:
|
||||
repository: "{{ .k8s_registry }}"
|
||||
tag: 3.5.0
|
||||
custom_label: {}
|
||||
# if auto renew kubernetes certs
|
||||
renew_certs:
|
||||
enabled: true
|
||||
|
|
@ -5,15 +5,18 @@
|
|||
src: |
|
||||
{{ .binary_dir }}/pki/root.crt
|
||||
dest: /etc/kubernetes/pki/etcd/ca.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd cert files to remote
|
||||
tags: ["certs"]
|
||||
copy:
|
||||
src: |
|
||||
{{ .binary_dir }}/pki/etcd.crt
|
||||
dest: /etc/kubernetes/pki/etcd/client.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd key files to remote
|
||||
tags: ["certs"]
|
||||
copy:
|
||||
src: |
|
||||
{{ .binary_dir }}/pki/etcd.key
|
||||
dest: /etc/kubernetes/pki/etcd/client.key
|
||||
mode: 0755
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
etcd:
|
||||
# endpoints: ["https://172.1.1.1:2379"]
|
||||
# endpoints: ["https://127.1.1.1:2379"]
|
||||
# etcd binary
|
||||
state: new
|
||||
# env config
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
- name: Generate kubeadm join config
|
||||
template:
|
||||
src: |
|
||||
{{- if .kube_version | semverCompare ">=v1.24.0" }}
|
||||
kubeadm/kubeadm-join.v1beta3
|
||||
{{- else }}
|
||||
kubeadm/kubeadm-join.v1beta2
|
||||
{{- end }}
|
||||
dest: /etc/kubernetes/kubeadm-config.yaml
|
||||
|
||||
- name: Sync audit policy file to remote
|
||||
copy:
|
||||
src: audit
|
||||
dest: /etc/kubernetes/audit/
|
||||
when: .kubernetes.audit
|
||||
|
||||
- name: Join kubernetes cluster
|
||||
block:
|
||||
- name: Join kubernetes by kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
|
||||
rescue:
|
||||
- name: Reset kubeadm if join failed
|
||||
command: kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
|
||||
|
||||
- name: Sync kubeconfig to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
dest: /root/.kube/config
|
||||
|
||||
- name: Set to worker node
|
||||
when: .groups.kube_worker | default list | has .inventory_name
|
||||
block:
|
||||
- name: Remote master taint
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
|
||||
|
|
@ -1,107 +0,0 @@
|
|||
---
|
||||
- name: select init node
|
||||
run_once: true
|
||||
set_fact:
|
||||
init_kubernetes_node: |
|
||||
{{- $initNodes := list -}}
|
||||
{{- $notInitNodes := list -}}
|
||||
{{- range .groups.kube_control_plane -}}
|
||||
{{- if index $.inventory_hosts . "kubernetes_install_service" "stdout" | eq "active" -}}
|
||||
{{- $initNodes = append $initNodes . -}}
|
||||
{{- else if index $.inventory_hosts . "kubernetes_install_service" "stdout" | eq "inactive" -}}
|
||||
{{- $notInitNodes = append $notInitNodes . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $initNodes | len | eq 1 -}}
|
||||
{{ $initNodes | first }}
|
||||
{{- else if $initNodes | len | lt 1 -}}
|
||||
{{ index $initNodes (randInt 0 ((sub ($initNodes | len) 1) | int)) }}
|
||||
{{- else if $notInitNodes | len | eq 1 -}}
|
||||
{{ $notInitNodes | first }}
|
||||
{{- else if $notInitNodes | len | lt 1 }}
|
||||
{{ index $notInitNodes (randInt 0 ((sub ($notInitNodes | len) 1) | int)) }}
|
||||
{{- end -}}
|
||||
|
||||
- include_tasks: high-availability/main.yaml
|
||||
|
||||
- include_tasks: install_binaries.yaml
|
||||
|
||||
- name: Init kubernetes
|
||||
when: eq .inventory_name .init_kubernetes_node
|
||||
block:
|
||||
- include_tasks: init_kubernetes.yaml
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
- include_tasks: deploy_cluster_dns.yaml
|
||||
- name: Fetch kubeconfig to local
|
||||
fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: |
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
- name: Generate certificate key by kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \
|
||||
| awk '/Using certificate key:/{getline; print}'
|
||||
register: kubeadm_cert_result
|
||||
- name: Set_Fact certificate key to all hosts
|
||||
set_fact:
|
||||
kubeadm_cert: |
|
||||
{{ .kubeadm_cert_result.stdout }}
|
||||
- name: Generate kubeadm token
|
||||
block:
|
||||
- name: Generate token by kubeadm
|
||||
command: /usr/local/bin/kubeadm token create
|
||||
register: kubeadm_token_result
|
||||
- name: Set_Fact token to all hosts
|
||||
set_fact:
|
||||
kubeadm_token: |
|
||||
{{ .kubeadm_token_result.stdout }}
|
||||
|
||||
- name: Join kubernetes
|
||||
when: ne .inventory_name .init_kubernetes_node
|
||||
block:
|
||||
# HAProxy is deployed as a static Pod, which starts only after Kubelet is running.
|
||||
# Therefore, the control plane must be reachable before HAProxy starts (e.g., by configuring /etc/hosts).
|
||||
- name: Write tmp dns to /etc/hosts
|
||||
command: |
|
||||
cat >> /etc/hosts << EOF
|
||||
# haproxy dns for kubekey begin
|
||||
{{- $internalIPv4 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv4" | default "" }}
|
||||
{{- $internalIPv6 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv6" | default "" }}
|
||||
{{- if ne $internalIPv4 "" }}
|
||||
{{ $internalIPv4 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- else if ne $internalIPv6 "" }}
|
||||
{{ $internalIPv6 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
# haproxy dns for kubekey end
|
||||
EOF
|
||||
when: .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
- include_tasks: join_kubernetes.yaml
|
||||
when: .kubernetes_install_service.stdout | eq ""
|
||||
- name: Replace haproxy dns to /etc/hosts
|
||||
when: .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
block:
|
||||
- name: Replace control_plane by local hosts
|
||||
when: .groups.kube_control_plane | default list | has .inventory_name
|
||||
command: |
|
||||
new_dns="# haproxy dns for kubekey begin
|
||||
{{- if ne .internal_ipv4 "" }}
|
||||
{{ .internal_ipv4 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- else if ne .internal_ipv6 "" }}
|
||||
{{ .internal_ipv6 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
# haproxy dns for kubekey end"
|
||||
sed -i '/# haproxy dns for kubekey begin/,/# haproxy dns for kubekey end/c\'"$new_dns" /etc/hosts
|
||||
- name: Replace worker by haproxy hosts
|
||||
when: .groups.worker | default list | has .inventory_name
|
||||
command: |
|
||||
new_dns="# haproxy dns for kubekey begin
|
||||
{{ .kubernetes.control_plane_endpoint.haproxy.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# haproxy dns for kubekey end"
|
||||
sed -i '/# haproxy dns for kubekey begin/,/# haproxy dns for kubekey end/c\'"$new_dns" /etc/hosts
|
||||
|
||||
- name: Add custom label to cluster
|
||||
command: |
|
||||
{{- range $k, $v := .kubernetes.custom_label }}
|
||||
/usr/local/bin/kubectl label --overwrite node {{ $.hostname }} {{ $k }}={{ $v }}
|
||||
{{- end }}
|
||||
when: .kubernetes.custom_label | len | lt 0
|
||||
|
|
@ -1,262 +0,0 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
createdby: 'kubekey'
|
||||
spec:
|
||||
clusterIP: {{ .kubernetes.networking.dns_service_ip }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: "coredns"
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: "kube-dns"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "coredns"
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 10%
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
createdby: 'kubekey'
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: coredns
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: In
|
||||
values:
|
||||
- ""
|
||||
containers:
|
||||
- name: coredns
|
||||
image: "{{ .kubernetes.networking.dns_image }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8181
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
{{- range .kubernetes.coredns.zone_configs }}
|
||||
{{ .zones | join " " }} {
|
||||
cache {{ .cache }}
|
||||
{{- range .additional_configs }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
|
||||
{{- range .rewrite }}
|
||||
rewrite {{ .rule }} {
|
||||
{{ .field }} {{ .type }} {{ .value }}
|
||||
{{ .options }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
|
||||
{{- if .kubernetes.zones | len | lt 0 }}
|
||||
kubernetes {{ .kubernetes.zones | join " " }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- range .forward }}
|
||||
forward {{ .from }} {{ .to | join " " }} {
|
||||
{{- if .except | len | lt 0 }}
|
||||
except {{ .except | join " " }}
|
||||
{{- end }}
|
||||
{{- if .force_tcp }}
|
||||
force_tcp
|
||||
{{- end }}
|
||||
{{- if .prefer_udp }}
|
||||
prefer_udp
|
||||
{{- end }}
|
||||
{{- if .max_fails }}
|
||||
max_fails {{ .max_fails }}
|
||||
{{- end }}
|
||||
{{- if .expire }}
|
||||
expire {{ .expire }}
|
||||
{{- end }}
|
||||
{{- if .tls }}
|
||||
tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
|
||||
{{- end }}
|
||||
{{- if .tls_servername }}
|
||||
tls_servername {{ .tls_servername }}
|
||||
{{- end }}
|
||||
{{- if .policy }}
|
||||
policy {{ .policy }}
|
||||
{{- end }}
|
||||
{{- if .health_check }}
|
||||
health_check {{ .health_check }}
|
||||
{{- end }}
|
||||
{{- if .max_concurrent }}
|
||||
max_concurrent {{ .max_concurrent }}
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
|
||||
hosts /etc/coredns/hosts {
|
||||
fallthrough
|
||||
}
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
|
||||
hosts: |
|
||||
{{- range .kubernetes.coredns.dns_etc_hosts }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -1,11 +1,5 @@
|
|||
renew_certs:
|
||||
enabled: false
|
||||
is_docker: |
|
||||
{{- if .cri.container_manager | eq "docker" }}
|
||||
true
|
||||
{{- else }}
|
||||
false
|
||||
{{- end }}
|
||||
is_kubeadm_alpha: |
|
||||
{{- if .kube_version | semverCompare "<v1.20.0" }}
|
||||
true
|
||||
|
|
@ -15,11 +15,7 @@ if [ $(getCertValidDays) -lt 30 ]; then
|
|||
echo "## Renewing certificates managed by kubeadm ##"
|
||||
${kubeadmCerts} renew all
|
||||
echo "## Restarting control plane pods managed by kubeadm ##"
|
||||
{{- if .renew_certs.is_docker }}
|
||||
$(which docker | grep docker) ps -af 'name=k8s_POD_(kube-apiserver|kube-controller-manager|kube-scheduler|etcd)-*' -q | /usr/bin/xargs $(which docker | grep docker) rm -f
|
||||
{{- else }}
|
||||
$(which crictl | grep crictl) pods --namespace kube-system --name 'kube-scheduler-*|kube-controller-manager-*|kube-apiserver-*|etcd-*' -q | /usr/bin/xargs $(which crictl | grep crictl) rmp -f
|
||||
{{- end }}
|
||||
echo "## Updating /root/.kube/config ##"
|
||||
cp /etc/kubernetes/admin.conf /root/.kube/config
|
||||
fi
|
||||
|
|
@ -1,17 +1,19 @@
|
|||
---
|
||||
- name: Generate coredns config
|
||||
template:
|
||||
src: dns/coredns.deployment
|
||||
src: dns/coredns.yaml
|
||||
dest: /etc/kubernetes/coredns.yaml
|
||||
|
||||
# change clusterIP for service
|
||||
# change configmap for coredns
|
||||
- name: Apply coredns config
|
||||
command: |
|
||||
kubectl delete svc kube-dns -n kube-system
|
||||
kubectl apply -f /etc/kubernetes/coredns.yaml
|
||||
kubectl apply -f /etc/kubernetes/coredns.yaml && kubectl rollout restart deployment -n kube-system coredns
|
||||
|
||||
- name: Generate nodelocaldns deployment
|
||||
template:
|
||||
src: dns/nodelocaldns.daemonset
|
||||
src: dns/nodelocaldns.yaml
|
||||
dest: /etc/kubernetes/nodelocaldns.yaml
|
||||
|
||||
- name: Apply coredns deployment
|
||||
|
|
@ -1,48 +1,27 @@
|
|||
---
|
||||
- name: Add kube user
|
||||
command: |
|
||||
useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube || :
|
||||
|
||||
- name: Create kube directories
|
||||
command: |
|
||||
if [ ! -d "{{ .item.path }}" ]; then
|
||||
mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
|
||||
fi
|
||||
loop:
|
||||
- {path: "/usr/local/bin", chown: "/usr/local/bin"}
|
||||
- {path: "/etc/kubernetes", chown: "/etc/kubernetes"}
|
||||
- {path: "/etc/kubernetes/pki", chown: "/etc/kubernetes/pki"}
|
||||
- {path: "/etc/kubernetes/manifests", chown: "/etc/kubernetes/manifests"}
|
||||
- {path: "/usr/local/bin/kube-scripts", chown: "/usr/local/bin/kube-scripts"}
|
||||
- {path: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec", chown: "/usr/libexec/kubernetes"}
|
||||
- {path: "/etc/cni/net.d", chown: "/etc/cni"}
|
||||
- {path: "/opt/cni/bin", chown: "/opt/cni"}
|
||||
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
|
||||
|
||||
- name: Sync external etcd config
|
||||
when: and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
|
||||
when:
|
||||
- and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
|
||||
- .groups.kube_control_plane | default list | has .inventory_name
|
||||
block:
|
||||
- name: Sync etcd ca file to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/pki/root.crt
|
||||
dest: /etc/kubernetes/pki/etcd/ca.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd cert files to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/pki/etcd.crt
|
||||
dest: /etc/kubernetes/pki/etcd/client.crt
|
||||
mode: 0755
|
||||
- name: Sync etcd key files to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/pki/etcd.key
|
||||
dest: /etc/kubernetes/pki/etcd/client.key
|
||||
|
||||
- name: Sync audit policy file to remote
|
||||
copy:
|
||||
src: audit
|
||||
dest: /etc/kubernetes/audit/
|
||||
when: .kubernetes.audit
|
||||
mode: 0755
|
||||
|
||||
- name: Generate kubeadm init config
|
||||
template:
|
||||
|
|
@ -71,15 +50,9 @@
|
|||
- .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
|
||||
- name: Init kubernetes cluster
|
||||
block:
|
||||
- name: Init kubernetes by kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
|
||||
rescue:
|
||||
- name: Reset kubeadm if init failed
|
||||
command: |
|
||||
kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
|
||||
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
|
||||
|
||||
- name: Copy kubeconfig to default dir
|
||||
command: |
|
||||
if [ ! -d /root/.kube ]; then
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
- name: select init node
|
||||
run_once: true
|
||||
set_fact:
|
||||
init_kubernetes_node: |
|
||||
{{- $initNodes := list -}}
|
||||
{{- $notInitNodes := list -}}
|
||||
{{- range .groups.kube_control_plane -}}
|
||||
{{- if index $.inventory_hosts . "kubernetes_install_service" "stdout" | eq "active" -}}
|
||||
{{- $initNodes = append $initNodes . -}}
|
||||
{{- else if index $.inventory_hosts . "kubernetes_install_service" "stdout" | eq "inactive" -}}
|
||||
{{- $notInitNodes = append $notInitNodes . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $initNodes | len | eq 1 -}}
|
||||
{{ $initNodes | first }}
|
||||
{{- else if $initNodes | len | lt 1 -}}
|
||||
{{ index $initNodes (randInt 0 ((sub ($initNodes | len) 1) | int)) }}
|
||||
{{- else if $notInitNodes | len | eq 1 -}}
|
||||
{{ $notInitNodes | first }}
|
||||
{{- else if $notInitNodes | len | lt 1 }}
|
||||
{{ index $notInitNodes (randInt 0 ((sub ($notInitNodes | len) 1) | int)) }}
|
||||
{{- end -}}
|
||||
|
||||
- name: Set custom hosts to /etc/hosts
|
||||
when: eq .kubernetes.control_plane_endpoint.type "local"
|
||||
command: |
|
||||
sed -i ':a;$!{N;ba};s@# kubekey custom BEGIN.*# kubekey custom END@@' /etc/hosts
|
||||
cat >> /etc/hosts <<EOF
|
||||
# kubekey custom BEGIN
|
||||
127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# kubekey custom END
|
||||
EOF
|
||||
|
||||
- name: Init kubernetes
|
||||
when: eq .inventory_name .init_kubernetes_node
|
||||
block:
|
||||
- include_tasks: init_kubernetes.yaml
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
- include_tasks: deploy_cluster_dns.yaml
|
||||
- name: Fetch kubeconfig to local
|
||||
fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: |
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
- name: Generate certificate key by kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config=/etc/kubernetes/kubeadm-config.yaml 2>&1 \
|
||||
| awk '/Using certificate key:/{getline; print}'
|
||||
register: kubeadm_cert_result
|
||||
- name: Set_Fact certificate key to all hosts
|
||||
set_fact:
|
||||
kubeadm_cert: |
|
||||
{{ .kubeadm_cert_result.stdout }}
|
||||
- name: Generate kubeadm token
|
||||
block:
|
||||
- name: Generate token by kubeadm
|
||||
command: /usr/local/bin/kubeadm token create
|
||||
register: kubeadm_token_result
|
||||
- name: Set_Fact token to all hosts
|
||||
set_fact:
|
||||
kubeadm_token: |
|
||||
{{ .kubeadm_token_result.stdout }}
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
createdby: 'kubekey'
|
||||
spec:
|
||||
clusterIP: {{ .kubernetes.networking.dns_service_ip }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
{{- range .kubernetes.coredns.zone_configs }}
|
||||
{{ .zones | join " " }} {
|
||||
cache {{ .cache }}
|
||||
{{- range .additional_configs }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
|
||||
{{- range .rewrite }}
|
||||
rewrite {{ .rule }} {
|
||||
{{ .field }} {{ .type }} {{ .value }}
|
||||
{{ .options }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
|
||||
{{- if .kubernetes.zones | len | lt 0 }}
|
||||
kubernetes {{ .kubernetes.zones | join " " }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- range .forward }}
|
||||
forward {{ .from }} {{ .to | join " " }} {
|
||||
{{- if .except | len | lt 0 }}
|
||||
except {{ .except | join " " }}
|
||||
{{- end }}
|
||||
{{- if .force_tcp }}
|
||||
force_tcp
|
||||
{{- end }}
|
||||
{{- if .prefer_udp }}
|
||||
prefer_udp
|
||||
{{- end }}
|
||||
{{- if .max_fails }}
|
||||
max_fails {{ .max_fails }}
|
||||
{{- end }}
|
||||
{{- if .expire }}
|
||||
expire {{ .expire }}
|
||||
{{- end }}
|
||||
{{- if .tls }}
|
||||
tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
|
||||
{{- end }}
|
||||
{{- if .tls_servername }}
|
||||
tls_servername {{ .tls_servername }}
|
||||
{{- end }}
|
||||
{{- if .policy }}
|
||||
policy {{ .policy }}
|
||||
{{- end }}
|
||||
{{- if .health_check }}
|
||||
health_check {{ .health_check }}
|
||||
{{- end }}
|
||||
{{- if .max_concurrent }}
|
||||
max_concurrent {{ .max_concurrent }}
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
|
||||
hosts /etc/coredns/hosts {
|
||||
fallthrough
|
||||
}
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
|
||||
hosts: |
|
||||
{{- range .kubernetes.coredns.dns_etc_hosts }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -6,8 +6,8 @@ kind: ClusterConfiguration
|
|||
etcd:
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
|
||||
local:
|
||||
imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
|
||||
imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
|
||||
imageRepository: {{ .kubernetes.etcd.image.repository }}
|
||||
imageTag: {{ .kubernetes.etcd.image.tag }}
|
||||
serverCertSANs:
|
||||
{{- range .groups.etcd | default list }}
|
||||
- {{ index $.inventory_hosts . "internal_ipv4" }}
|
||||
|
|
@ -24,8 +24,8 @@ etcd:
|
|||
{{- end }}
|
||||
dns:
|
||||
type: CoreDNS
|
||||
imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
|
||||
imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
|
||||
imageRepository: {{ .kubernetes.networking.dns_image.repository }}
|
||||
imageTag: {{ .kubernetes.networking.dns_image.tag }}
|
||||
imageRepository: {{ .kubernetes.image_repository }}
|
||||
kubernetesVersion: {{ .kube_version }}
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
|
|
@ -6,8 +6,8 @@ kind: ClusterConfiguration
|
|||
etcd:
|
||||
{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
|
||||
local:
|
||||
imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
|
||||
imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
|
||||
imageRepository: {{ .kubernetes.etcd.image.repository }}
|
||||
imageTag: {{ .kubernetes.etcd.image.tag }}
|
||||
serverCertSANs:
|
||||
{{- range .groups.etcd | default list }}
|
||||
- {{ index $.inventory_hosts . "internal_ipv4" }}
|
||||
|
|
@ -23,8 +23,8 @@ etcd:
|
|||
keyFile: /etc/kubernetes/pki/etcd/client.key
|
||||
{{- end }}
|
||||
dns:
|
||||
imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
|
||||
imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
|
||||
imageRepository: {{ .kubernetes.networking.dns_image.repository }}
|
||||
imageTag: {{ .kubernetes.networking.dns_image.tag }}
|
||||
imageRepository: {{ .kubernetes.image_repository }}
|
||||
kubernetesVersion: {{ .kube_version }}
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
- name: Set init_kubernetes_node hosts to /etc/hosts
|
||||
when: eq .kubernetes.control_plane_endpoint.type "local"
|
||||
command: |
|
||||
sed -i ':a;$!{N;ba};s@# kubekey custom BEGIN.*# kubekey custom END@@' /etc/hosts
|
||||
cat >> /etc/hosts <<EOF
|
||||
# kubekey custom BEGIN
|
||||
{{- if and (index .inventory_hosts .init_kubernetes_node "internal_ipv4") (ne (index .inventory_hosts .init_kubernetes_node "internal_ipv4") "") }}
|
||||
{{ index .inventory_hosts .init_kubernetes_node "internal_ipv4" }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
{{- if and (index .inventory_hosts .init_kubernetes_node "internal_ipv6") (ne (index .inventory_hosts .init_kubernetes_node "internal_ipv6") "") }}
|
||||
{{ index .inventory_hosts .init_kubernetes_node "internal_ipv6" }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
# kubekey custom END
|
||||
EOF
|
||||
|
||||
- name: Generate kubeadm join config
|
||||
template:
|
||||
src: |
|
||||
{{- if .kube_version | semverCompare ">=v1.24.0" }}
|
||||
kubeadm/kubeadm-join.v1beta3
|
||||
{{- else }}
|
||||
kubeadm/kubeadm-join.v1beta2
|
||||
{{- end }}
|
||||
dest: /etc/kubernetes/kubeadm-config.yaml
|
||||
|
||||
- name: Join kubernetes cluster
|
||||
command: |
|
||||
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
|
||||
|
||||
- name: Sync kubeconfig to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/kubeconfig
|
||||
dest: /root/.kube/config
|
||||
|
||||
- name: Set to worker node
|
||||
when: .groups.kube_worker | default list | has .inventory_name
|
||||
block:
|
||||
- name: Remote master taint
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ .hostname }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ .hostname }} node-role.kubernetes.io/worker=
|
||||
|
||||
- name: Set change custom hosts to /etc/hosts
|
||||
when:
|
||||
- eq .kubernetes.control_plane_endpoint.type "local"
|
||||
- .groups.kube_control_plane | default list | has .inventory_name
|
||||
command: |
|
||||
sed -i ':a;$!{N;ba};s@# kubekey custom BEGIN.*# kubekey custom END@@' /etc/hosts
|
||||
cat >> /etc/hosts <<EOF
|
||||
# kubekey custom BEGIN
|
||||
127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# kubekey custom END
|
||||
EOF
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
- name: Join kubernetes
|
||||
when: ne .inventory_name .init_kubernetes_node
|
||||
block:
|
||||
# HAProxy is deployed as a static Pod, which starts only after Kubelet is running.
|
||||
# Therefore, the control plane must be reachable before HAProxy starts (e.g., by configuring /etc/hosts).
|
||||
- name: Write tmp dns to /etc/hosts
|
||||
command: |
|
||||
cat >> /etc/hosts << EOF
|
||||
# haproxy dns for kubekey begin
|
||||
{{- $internalIPv4 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv4" | default "" }}
|
||||
{{- $internalIPv6 := index .inventory_hosts (.groups.kube_control_plane | default list | first) "internal_ipv6" | default "" }}
|
||||
{{- if ne $internalIPv4 "" }}
|
||||
{{ $internalIPv4 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- else if ne $internalIPv6 "" }}
|
||||
{{ $internalIPv6 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
# haproxy dns for kubekey end
|
||||
EOF
|
||||
when: .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
- include_tasks: join_kubernetes.yaml
|
||||
when: .kubernetes_install_service.stdout | eq "inactive"
|
||||
- name: Replace haproxy dns to /etc/hosts
|
||||
when: .kubernetes.control_plane_endpoint.type | eq "haproxy"
|
||||
block:
|
||||
- name: Replace control_plane by local hosts
|
||||
when: .groups.kube_control_plane | default list | has .inventory_name
|
||||
command: |
|
||||
new_dns="# haproxy dns for kubekey begin
|
||||
{{- if ne .internal_ipv4 "" }}
|
||||
{{ .internal_ipv4 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- else if ne .internal_ipv6 "" }}
|
||||
{{ .internal_ipv6 }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
{{- end }}
|
||||
# haproxy dns for kubekey end"
|
||||
sed -i '/# haproxy dns for kubekey begin/,/# haproxy dns for kubekey end/c\'"$new_dns" /etc/hosts
|
||||
- name: Replace worker by haproxy hosts
|
||||
when: .groups.worker | default list | has .inventory_name
|
||||
command: |
|
||||
new_dns="# haproxy dns for kubekey begin
|
||||
{{ .kubernetes.control_plane_endpoint.haproxy.address }} {{ .kubernetes.control_plane_endpoint.host }}
|
||||
# haproxy dns for kubekey end"
|
||||
sed -i '/# haproxy dns for kubekey begin/,/# haproxy dns for kubekey end/c\'"$new_dns" /etc/hosts
|
||||
|
|
@ -21,5 +21,5 @@
|
|||
- name: Generate kube_vip manifest
|
||||
template:
|
||||
src: |
|
||||
kubevip/kubevip.{{ .kubernetes.kube_vip.mode }}
|
||||
kubevip/kubevip.{{ .kubernetes.control_plane_endpoint.kube_vip.mode }}
|
||||
dest: /etc/kubernetes/manifests/kubevip.yaml
|
||||
|
|
@ -1,12 +1,3 @@
|
|||
- name: Set custom hosts to /etc/hosts
|
||||
when: and .kubernetes.control_plane_endpoint.etc_hosts (.kubernetes.control_plane_endpoint.etc_hosts | ne "")
|
||||
command: |
|
||||
cat >> /etc/hosts <<EOF
|
||||
# Custom by kubekey begin
|
||||
{{ .kubernetes.control_plane_endpoint.etc_hosts }}
|
||||
# Custom by kubekey end
|
||||
EOF
|
||||
|
||||
# install with static pod: https://kube-vip.io/docs/installation/static/
|
||||
- include_tasks: high-availability/kube_vip.yaml
|
||||
when:
|
||||
|
|
@ -32,6 +32,7 @@
|
|||
ignore_errors: true
|
||||
command: kubectl version --short
|
||||
register: kubectl_install_version
|
||||
register_type: yaml
|
||||
- name: Sync kubectl to remote
|
||||
when: |
|
||||
or (.kubectl_install_version.stderr | ne "") ((get .kubectl_install_version.stdout "Server Version") | ne .kube_version)
|
||||
|
|
@ -56,7 +57,7 @@
|
|||
mode: 0755
|
||||
- name: Sync kubelet env to remote
|
||||
template:
|
||||
src: kubeadm/kubelet.env
|
||||
src: kubelet/kubelet.env
|
||||
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
- name: Sync kubelet service to remote
|
||||
copy:
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
- include_tasks: install_binaries.yaml
|
||||
|
||||
- include_tasks: high-availability/main.yaml
|
||||
|
||||
# - name: Add kube user
|
||||
# command: |
|
||||
# useradd -M -c 'Kubernetes user' -s /sbin/nologin -r kube || :
|
||||
|
||||
- name: Create kube directories
|
||||
command: |
|
||||
if [ ! -d "{{ .item.path }}" ]; then
|
||||
mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
|
||||
fi
|
||||
loop:
|
||||
- {path: "/usr/local/bin", chown: "/usr/local/bin"}
|
||||
- {path: "/etc/kubernetes", chown: "/etc/kubernetes"}
|
||||
- {path: "/etc/kubernetes/pki", chown: "/etc/kubernetes/pki"}
|
||||
- {path: "/etc/kubernetes/manifests", chown: "/etc/kubernetes/manifests"}
|
||||
- {path: "/usr/local/bin/kube-scripts", chown: "/usr/local/bin/kube-scripts"}
|
||||
- {path: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec", chown: "/usr/libexec/kubernetes"}
|
||||
- {path: "/etc/cni/net.d", chown: "/etc/cni"}
|
||||
- {path: "/opt/cni/bin", chown: "/opt/cni"}
|
||||
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
|
||||
|
||||
- name: Sync audit policy file to remote
|
||||
copy:
|
||||
src: audit
|
||||
dest: /etc/kubernetes/audit/
|
||||
when: .kubernetes.audit
|
||||
|
|
@ -38,8 +38,8 @@ spec:
|
|||
- name: lb_port
|
||||
value: "6443"
|
||||
- name: address
|
||||
value: {{ .kubernetes.kube_vip.address }}
|
||||
image: {{ .kubernetes.kube_vip.image }}
|
||||
value: {{ .kubernetes.control_plane_endpoint.kube_vip.address }}
|
||||
image: {{ .kubernetes.control_plane_endpoint.kube_vip.image }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
|
|
@ -54,10 +54,10 @@ spec:
|
|||
- name: lb_fwdmethod
|
||||
value: local
|
||||
- name: address
|
||||
value: {{ .kubernetes.kube_vip.address }}
|
||||
value: {{ .kubernetes.control_plane_endpoint.kube_vip.address }}
|
||||
- name: prometheus_server
|
||||
value: :2112
|
||||
image: {{ .kubernetes.kube_vip.image }}
|
||||
image: {{ .kubernetes.control_plane_endpoint.kube_vip.image }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
Loading…
Reference in New Issue