mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
fix: Make the /schema/config POST endpoint more robust. (#2699)
Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
0f40e29791
commit
4c72031a74
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.6
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.28.5
|
||||
# - docker.io/calico/ctl:v3.24.5
|
||||
# - docker.io/calico/typha:v3.24.5
|
||||
# - docker.io/calico/apiserver:v3.24.5
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.6
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.29.3
|
||||
# - docker.io/calico/ctl:v3.25.1
|
||||
# - docker.io/calico/typha:v3.25.1
|
||||
# - docker.io/calico/apiserver:v3.25.1
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.6
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.29.3
|
||||
# - docker.io/calico/ctl:v3.25.1
|
||||
# - docker.io/calico/typha:v3.25.1
|
||||
# - docker.io/calico/apiserver:v3.25.1
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.7
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.30.4
|
||||
# - docker.io/calico/ctl:v3.26.1
|
||||
# - docker.io/calico/typha:v3.26.1
|
||||
# - docker.io/calico/apiserver:v3.26.1
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.7
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.30.4
|
||||
# - docker.io/calico/ctl:v3.26.1
|
||||
# - docker.io/calico/typha:v3.26.1
|
||||
# - docker.io/calico/apiserver:v3.26.1
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.8
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.8
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.8
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.8
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ spec:
|
|||
# should be greater than or equal to kube_version_min_required.
|
||||
kube_version: {{ .kube_version }}
|
||||
# helm binary
|
||||
helm_version: v3.13.3
|
||||
helm_version: v3.14.3
|
||||
# etcd binary
|
||||
etcd_version: v3.5.11
|
||||
# ========== image registry ==========
|
||||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.8
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ spec:
|
|||
# should be greater than or equal to kube_version_min_required.
|
||||
kube_version: {{ .kube_version }}
|
||||
# helm binary
|
||||
helm_version: v3.13.3
|
||||
helm_version: v3.18.5
|
||||
# etcd binary
|
||||
etcd_version: v3.5.11
|
||||
# ========== image registry ==========
|
||||
|
|
@ -81,7 +81,7 @@ spec:
|
|||
# - docker.io/kubesphere/kube-proxy:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/kube-scheduler:{{ .kube_version }}
|
||||
# - docker.io/kubesphere/pause:3.9
|
||||
# - quay.io/tigera/operator:v1.36.5
|
||||
# - quay.io/tigera/operator:v1.34.5
|
||||
# - docker.io/calico/ctl:v3.28.2
|
||||
# - docker.io/calico/typha:v3.28.2
|
||||
# - docker.io/calico/apiserver:v3.28.2
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
# load defaults vars
|
||||
- hosts:
|
||||
- all
|
||||
|
|
@ -6,8 +8,6 @@
|
|||
- vars/common.yaml
|
||||
- vars/kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
# precheck
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
@ -99,5 +99,3 @@
|
|||
- or (.add_nodes | default list | empty) (.add_nodes | default list | has .inventory_hostname)
|
||||
- .groups.kube_control_plane | default list | has .inventory_hostname
|
||||
- .kubernetes.certs.renew
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
@ -10,5 +10,3 @@
|
|||
- name: Export artifact
|
||||
command: |
|
||||
cd {{ .binary_dir }} && tar -czvf {{ .artifact_file }} *
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,15 +1,19 @@
|
|||
---
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
tags: ["always"]
|
||||
pre_tasks:
|
||||
- name: Image | Download container images
|
||||
tags: ["always"]
|
||||
image:
|
||||
pull:
|
||||
images_dir: >-
|
||||
{{ .binary_dir }}/images/
|
||||
manifests: "{{ .image_manifests | toJson }}"
|
||||
when:
|
||||
- .image_manifests | default list | empty | not
|
||||
roles:
|
||||
- role: init/init-artifact
|
||||
tags: ["always"]
|
||||
- role: init/init-cert
|
||||
tags: ["always"]
|
||||
- role: install/image-registry
|
||||
tags: ["always"]
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
# load defaults vars
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- all
|
||||
vars_files:
|
||||
- vars/certs_renew.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
tags: ["certs"]
|
||||
|
|
@ -18,5 +18,3 @@
|
|||
tags: ["certs"]
|
||||
roles:
|
||||
- role: certs/renew
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,4 +1,7 @@
|
|||
---
|
||||
- import_playbook: hook/default.yaml
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
# load defaults vars
|
||||
- hosts:
|
||||
- all
|
||||
|
|
@ -6,8 +9,6 @@
|
|||
- vars/common.yaml
|
||||
- vars/kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
# precheck
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
# load defaults vars
|
||||
- hosts:
|
||||
- all
|
||||
|
|
@ -6,8 +8,6 @@
|
|||
- vars/common.yaml
|
||||
- vars/kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
- hosts:
|
||||
- k8s_cluster
|
||||
roles:
|
||||
|
|
@ -37,5 +37,3 @@
|
|||
- role: uninstall/image-registry
|
||||
when:
|
||||
- .deleteImageRegistry
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
# load defaults vars
|
||||
- hosts:
|
||||
- all
|
||||
|
|
@ -6,8 +8,6 @@
|
|||
- vars/common.yaml
|
||||
- vars/kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
- hosts:
|
||||
- kube_control_plane
|
||||
gather_facts: true
|
||||
|
|
@ -94,5 +94,3 @@
|
|||
when:
|
||||
- .deleteImageRegistry
|
||||
- .delete_nodes | default list | has .inventory_hostname
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
# load defaults vars
|
||||
- hosts:
|
||||
- all
|
||||
|
|
@ -6,11 +8,7 @@
|
|||
- vars/common.yaml
|
||||
- vars/kubernetes.yaml
|
||||
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
|
||||
- hosts:
|
||||
- image_registry
|
||||
roles:
|
||||
- role: uninstall/image-registry
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
- role: uninstall/image-registry
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
- hosts:
|
||||
- all
|
||||
gather_facts: true
|
||||
tags: ["always"]
|
||||
vars:
|
||||
architectures:
|
||||
amd64:
|
||||
- amd64
|
||||
- x86_64
|
||||
arm64:
|
||||
- arm64
|
||||
- aarch64
|
||||
tasks:
|
||||
- name: Get os arch for each node
|
||||
tags: ["always"]
|
||||
set_fact:
|
||||
binary_type: >-
|
||||
{{- if .architectures.amd64 | has .os.architecture -}}
|
||||
amd64
|
||||
{{- else if .architectures.arm64 | has .os.architecture -}}
|
||||
arm64
|
||||
{{- end -}}
|
||||
|
||||
- hosts:
|
||||
- all
|
||||
tags: ["always"]
|
||||
vars:
|
||||
# work_dir: default is <current_dir>/kubekey
|
||||
binary_dir: >-
|
||||
{{ .work_dir }}/kubekey
|
||||
scripts_dir: >-
|
||||
{{ .binary_dir }}/scripts
|
||||
tmp_dir: /tmp/kubekey
|
||||
|
|
@ -1,36 +1,4 @@
|
|||
---
|
||||
- hosts:
|
||||
- all
|
||||
gather_facts: true
|
||||
vars:
|
||||
architectures:
|
||||
amd64:
|
||||
- amd64
|
||||
- x86_64
|
||||
arm64:
|
||||
- arm64
|
||||
- aarch64
|
||||
tasks:
|
||||
- name: Get os arch for each node
|
||||
set_fact:
|
||||
binary_type: >-
|
||||
{{- if .architectures.amd64 | has .os.architecture -}}
|
||||
amd64
|
||||
{{- else if .architectures.arm64 | has .os.architecture -}}
|
||||
arm64
|
||||
{{- end -}}
|
||||
|
||||
- hosts:
|
||||
- all
|
||||
vars:
|
||||
# work_dir: default is <current_dir>/kubekey
|
||||
binary_dir: >-
|
||||
{{ .work_dir }}/kubekey
|
||||
scripts_dir: >-
|
||||
{{ .binary_dir }}/scripts
|
||||
tmp_dir: /tmp/kubekey
|
||||
|
||||
|
||||
- name: Execute pre install scripts
|
||||
hosts:
|
||||
- all
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
- name: Check Connect
|
||||
hosts:
|
||||
- all
|
||||
tasks:
|
||||
- name: get host info
|
||||
ignore_errors: true
|
||||
setup: {}
|
||||
- name: set result
|
||||
run_once: true
|
||||
vars:
|
||||
architectures:
|
||||
amd64:
|
||||
- amd64
|
||||
- x86_64
|
||||
arm64:
|
||||
- arm64
|
||||
- aarch64
|
||||
result: |
|
||||
{{- range $k,$v := .hostvars }}
|
||||
{{ $k }}: {{ if $.architectures.amd64 | has ($v.os.architecture | default "") }}amd64{{ else if $.architectures.arm64 | has .os.architecture }}arm64{{ else }}{{ $v.os.architecture | default "" }}{{ end }}
|
||||
{{- end }}
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
@ -14,5 +14,3 @@
|
|||
- nfs
|
||||
roles:
|
||||
- init/init-os
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
@ -13,5 +13,3 @@
|
|||
roles:
|
||||
- init/init-os
|
||||
- install/image-registry
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- import_playbook: hook/pre_install.yaml
|
||||
- import_playbook: hook/default.yaml
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
|
|
@ -17,5 +17,3 @@
|
|||
roles:
|
||||
- role: precheck/env_check
|
||||
tags: ["always"]
|
||||
|
||||
- import_playbook: hook/post_install.yaml
|
||||
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
- name: Artifact | Extract artifact archive to working directory
|
||||
tags: ["always"]
|
||||
command: |
|
||||
if [ -f "{{ .artifact_file }}" ]; then
|
||||
mkdir -p {{ .binary_dir }}
|
||||
|
|
@ -16,7 +15,7 @@
|
|||
# Download Helm and CNI binaries
|
||||
- include_tasks: download_helm.yaml
|
||||
# Download remote images to the local images directory
|
||||
- name: Download container images
|
||||
- name: Artifact | Download container images
|
||||
image:
|
||||
pull:
|
||||
images_dir: >-
|
||||
|
|
@ -26,7 +25,6 @@
|
|||
- .image_manifests | default list | empty | not
|
||||
|
||||
- name: Artifact | Set ownership of working directory to sudo user
|
||||
tags: ["always"]
|
||||
ignore_errors: true
|
||||
command: |
|
||||
chown -R ${SUDO_UID}:${SUDO_GID} {{ .work_dir }}
|
||||
|
|
|
|||
|
|
@ -59,15 +59,16 @@ state = "/run/containerd"
|
|||
{{- if or (.cri.registry.auths | empty | not) (.groups.image_registry | default list | empty | not) }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs]
|
||||
{{- end }}
|
||||
{{- if .groups.image_registry | default list | empty | not }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".auth]
|
||||
username = "{{ .image_registry.auth.username }}"
|
||||
password = "{{ .image_registry.auth.password }}"
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".tls]
|
||||
{{- if .groups.image_registry | default list | empty | not }}ecure_skip_verify = true
|
||||
ca_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt"
|
||||
cert_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt"
|
||||
key_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key"
|
||||
{{- end }}
|
||||
insecure_skip_verify = {{ .image_registry.auth.skip_ssl | default true }}
|
||||
{{- if .cri.registry.auths | empty | not }}
|
||||
{{- range .cri.registry.auths }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth]
|
||||
|
|
|
|||
|
|
@ -41,3 +41,4 @@ image_registry:
|
|||
{{- end -}}
|
||||
username: admin
|
||||
password: Harbor12345
|
||||
insecure: true
|
||||
|
|
|
|||
|
|
@ -9,9 +9,11 @@
|
|||
{{- if .cri.registry.mirrors }}
|
||||
"registry-mirrors": {{ .cri.registry.mirrors | toJson }},
|
||||
{{- end }}
|
||||
{{- if .cri.registry.insecure_registries }}
|
||||
"insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
|
||||
{{- end }}
|
||||
{{- $insecure_registries := .cri.registry.insecure_registries | default list -}}
|
||||
{{- if .image_registry.auth.skip_ssl -}}
|
||||
{{- $insecure_registries = append $insecure_registries .image_registry.auth.registry -}}
|
||||
{{- end -}}
|
||||
"insecure-registries": {{ $insecure_registries | toJson }},
|
||||
{{- if .cri.docker.bridge_ip }}
|
||||
"bip": "{{ .cri.docker.bridge_ip }}",
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ func (o *CommonOptions) Run(ctx context.Context, playbook *kkcorev1.Playbook) er
|
|||
return errors.Wrapf(err, "failed to create local dir %q for playbook %q", o.Workdir, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}
|
||||
restconfig := &rest.Config{QPS: 100, Burst: 200}
|
||||
restconfig := &rest.Config{}
|
||||
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ func newWebCommand() *cobra.Command {
|
|||
Short: "start a http server with web UI.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Initialize REST config for Kubernetes client
|
||||
restconfig := &rest.Config{QPS: 100, Burst: 200}
|
||||
restconfig := &rest.Config{}
|
||||
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func (e *taskExecutor) Exec(ctx context.Context) error {
|
|||
failedMsg := "\n"
|
||||
for _, result := range e.task.Status.HostResults {
|
||||
if result.Error != "" {
|
||||
failedMsg += fmt.Sprintf("[%s]: %s\n", result.Host, result.StdErr)
|
||||
failedMsg += fmt.Sprintf("[%s]: %s: %s\n", result.Host, result.StdErr, result.Error)
|
||||
}
|
||||
}
|
||||
return errors.Errorf("task [%s](%s) run failed: %s", e.task.Spec.Name, ctrlclient.ObjectKeyFromObject(e.task), failedMsg)
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ func (f *project) dealImportPlaybook(p kkprojectv1.Play, basePlaybook string) er
|
|||
return nil
|
||||
}
|
||||
|
||||
// dealVarsFiles handles the "var_files" argument in a play
|
||||
// dealVarsFiles handles the "vars_files" argument in a play
|
||||
func (f *project) dealVarsFiles(p *kkprojectv1.Play, basePlaybook string) error {
|
||||
for _, varsFileStr := range p.VarsFiles {
|
||||
// load vars from vars_files
|
||||
|
|
|
|||
|
|
@ -57,6 +57,8 @@ func RestConfig(runtimedir string, restconfig *rest.Config) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restconfig.QPS = 500
|
||||
restconfig.Burst = 200
|
||||
restconfig.TLSClientConfig = rest.TLSClientConfig{}
|
||||
|
||||
restconfig.Transport = transport
|
||||
|
|
|
|||
|
|
@ -21,12 +21,10 @@ var MergeRemoteVariable = func(data map[string]any, hostnames ...string) MergeFu
|
|||
if _, ok := vv.value.Hosts[hostname]; !ok {
|
||||
return errors.Errorf("when merge source is remote. HostName %s not exist", hostname)
|
||||
}
|
||||
|
||||
// Only set RemoteVars if it is currently empty to avoid overwriting existing remote variables.
|
||||
if hv := vv.value.Hosts[hostname]; len(hv.RemoteVars) == 0 {
|
||||
hv.RemoteVars = data
|
||||
vv.value.Hosts[hostname] = hv
|
||||
}
|
||||
// always update remote variable
|
||||
hv := vv.value.Hosts[hostname]
|
||||
hv.RemoteVars = data
|
||||
vv.value.Hosts[hostname] = hv
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -2,9 +2,11 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
|
|
@ -62,7 +64,8 @@ func (m *manager) executor(playbook *kkcorev1.Playbook, client ctrlclient.Client
|
|||
m.addPlaybook(playbook, cancel)
|
||||
// Execute the playbook and write output to the log file
|
||||
if err := executor.NewPlaybookExecutor(ctx, client, playbook, file).Exec(ctx); err != nil {
|
||||
klog.ErrorS(err, "failed to exec playbook", "playbook", playbook.Name)
|
||||
// recode to log file
|
||||
fmt.Fprintf(file, "%s [Playbook %s] ERROR: %v\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(playbook), err)
|
||||
}
|
||||
// Remove the playbook from the playbookManager after execution
|
||||
m.deletePlaybook(playbook)
|
||||
|
|
|
|||
|
|
@ -84,17 +84,12 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
}
|
||||
|
||||
// Open config file for reading and writing.
|
||||
configFile, err := os.OpenFile(filepath.Join(h.rootPath, api.SchemaConfigFile), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
defer configFile.Close()
|
||||
|
||||
// Decode old config if present.
|
||||
if err := json.NewDecoder(configFile).Decode(&oldConfig); err != nil && err != io.EOF {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
if oldConfigFile, err := os.ReadFile(filepath.Join(h.rootPath, api.SchemaConfigFile)); err == nil {
|
||||
// Decode old config if present.
|
||||
if err := json.Unmarshal(oldConfigFile, &oldConfig); err != nil && !errors.Is(err, io.EOF) {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
namespace := query.DefaultString(request.QueryParameter("cluster"), "default")
|
||||
|
|
@ -104,9 +99,20 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
|
||||
// Iterate over new config and trigger precheck playbooks if config changed.
|
||||
for fileName, newVal := range newConfig {
|
||||
// if config is not change skip it
|
||||
if reflect.DeepEqual(newVal, oldConfig[fileName]) {
|
||||
continue
|
||||
}
|
||||
// if playbook has created should skip it.
|
||||
playbookList := &kkcorev1.PlaybookList{}
|
||||
if err := h.client.List(request.Request.Context(), playbookList, ctrlclient.InNamespace(namespace),
|
||||
ctrlclient.MatchingLabels{"install." + api.SchemaLabelSubfix: fileName}); err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
if len(playbookList.Items) > 0 {
|
||||
continue
|
||||
}
|
||||
schemaInfo, err := os.ReadFile(filepath.Join(h.rootPath, fileName))
|
||||
if err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
|
|
@ -168,7 +174,7 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
}
|
||||
|
||||
// Write new config to file.
|
||||
if _, err := configFile.Write(bodyBytes); err != nil {
|
||||
if err := os.WriteFile(filepath.Join(h.rootPath, api.SchemaConfigFile), bodyBytes, 0644); err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -177,7 +183,7 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
if len(preCheckResult) > 0 {
|
||||
_ = response.WriteHeaderAndEntity(http.StatusUnprocessableEntity, api.Result{Message: api.ResultFailed, Result: preCheckResult})
|
||||
} else {
|
||||
_ = response.WriteEntity(api.SUCCESS)
|
||||
_ = response.WriteEntity(api.SUCCESS.SetResult(newConfig))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue