fix: more clear error (#2694)

Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
liujian 2025-08-12 11:07:45 +08:00 committed by GitHub
parent 48b7c3b34b
commit 0f40e29791
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 333 additions and 308 deletions

View File

@ -49,14 +49,14 @@ type InventoryHost map[string]runtime.RawExtension
// InventoryGroup of Inventory
type InventoryGroup struct {
Groups []string `json:"groups,omitempty"`
Hosts []string `json:"hosts,omitempty"`
Hosts []string `json:"hosts"`
Vars runtime.RawExtension `json:"vars,omitempty"`
}
// InventorySpec of Inventory
type InventorySpec struct {
// Hosts is all nodes
Hosts InventoryHost `json:"hosts,omitempty"`
Hosts InventoryHost `json:"hosts"`
// Vars for all host. the priority for vars is: host vars > group vars > inventory vars
// +optional
// +kubebuilder:pruning:PreserveUnknownFields

View File

@ -3,8 +3,8 @@
- hosts:
- all
vars_files:
- vars/create_cluster.yaml
- vars/create_cluster_kubernetes.yaml
- vars/common.yaml
- vars/kubernetes.yaml
- import_playbook: hook/pre_install.yaml

View File

@ -3,8 +3,8 @@
- hosts:
- all
vars_files:
- vars/create_cluster.yaml
- vars/create_cluster_kubernetes.yaml
- vars/common.yaml
- vars/kubernetes.yaml
- import_playbook: hook/pre_install.yaml
@ -34,7 +34,7 @@
- hosts:
- etcd
- k8s_cluster
- registry
- image_registry
- nfs
roles:
- init/init-os

View File

@ -3,7 +3,8 @@
- hosts:
- all
vars_files:
- vars/delete_cluster.yaml
- vars/common.yaml
- vars/kubernetes.yaml
- import_playbook: hook/pre_install.yaml

View File

@ -3,7 +3,8 @@
- hosts:
- all
vars_files:
- vars/delete_cluster.yaml
- vars/common.yaml
- vars/kubernetes.yaml
- import_playbook: hook/pre_install.yaml

View File

@ -3,7 +3,8 @@
- hosts:
- all
vars_files:
- vars/delete_cluster.yaml
- vars/common.yaml
- vars/kubernetes.yaml
- import_playbook: hook/pre_install.yaml

View File

@ -11,6 +11,7 @@
- image_registry
gather_facts: true
roles:
- init/init-os
- install/image-registry
- import_playbook: hook/post_install.yaml

View File

@ -0,0 +1,45 @@
# The global registry used for all images. Leave empty to use default registries.
global_registry: ""
# The registry to use for docker.io images.
dockerio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
docker.io
{{- end -}}
# The registry to use for quay.io images.
quayio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
quay.io
{{- end -}}
# The registry to use for ghcr.io images.
ghcrio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
ghcr.io
{{- end -}}
# Enable or disable security enhancement features.
security_enhancement: false
# Set to true to remove the container runtime interface (CRI) such as containerd or Docker from target nodes.
deleteCRI: false
# Set to true to uninstall etcd from target nodes.
deleteETCD: false
# Set to true to remove local DNS entries managed by Kubekey from the specified files.
deleteDNS: false
# Set to true to uninstall the image registry from target nodes.
deleteImageRegistry: false
# List of local DNS files to clean up if deleteDNS is enabled.
localDNS:
- /etc/hosts

View File

@ -1,29 +0,0 @@
# global_registry for all image
global_registry: ""
# dockerio_registry for docker.io image.
dockerio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
docker.io
{{- end -}}
# quayio_registry for quay.io image.
quayio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
quay.io
{{- end -}}
# ghcrio_registry for ghcr.io image.
ghcrio_registry: >-
{{- if .global_registry | empty | not -}}
{{ .global_registry }}
{{- else -}}
ghcr.io
{{- end -}}
security_enhancement: false
cri:
# support: containerd,docker
container_manager: docker

View File

@ -1,229 +0,0 @@
kubernetes:
cluster_name: kubekey
# support: flannel, calico
kube_network_plugin: calico
# the image repository of kubernetes.
image_repository: >-
{{ .dockerio_registry }}/kubesphere
# memory size for each kube_worker node.(unit kB)
# should be greater than or equal to minimal_node_memory_mb.
minimal_node_memory_mb: 10
# the maximum number of pods that can be run on each node.
max_pods: 110
audit: false
networking:
# the whole pod_cidr in cluster. support: ipv4; ipv6; ipv4,ipv6.
pod_cidr: 10.233.64.0/18
# the subnet ipv4 pod_cidr in each node.
ipv4_mask_size: 24
# the subnet ipv6 pod_cidr in each node.
ipv6_mask_size: 64
# the whole service_cidr in cluster. support: ipv4; ipv6; ipv4,ipv6.
service_cidr: 10.233.0.0/18
dns_domain: cluster.local
dns_image:
registry: >-
{{ .dockerio_registry }}
repository: >-
coredns
tag: 1.8.6
dns_cache_image:
registry: >-
{{ .dockerio_registry }}
repository: kubesphere/k8s-dns-node-cache
tag: 1.22.20
dns_service_ip: >-
{{ index (.kubernetes.networking.service_cidr | ipInCIDR) 2 }}
# nodelocaldns bind ip
clusterDNS: 169.254.25.10
apiserver:
port: 6443
certSANs: []
extra_args:
# feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
controller_manager:
extra_args:
# feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
scheduler:
extra_args:
# feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
kube_proxy:
enabled: true
# support ipvs and iptables
mode: "ipvs"
config:
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
kubelet:
max_pod: 110
pod_pids_limit: 10000
# feature_gates:
container_log_max_size: 5Mi
container_log_max_files: 3
# extra_args:
coredns:
dns_etc_hosts: []
# the config for zones
zone_configs:
# DNS zones to match. default use port of 53. the format like this.
# .: all dns zone.
# example.com: match *.example.com use dns server with port 53
# example.com:54: match *.example.com use dns server with port 54
- zones: [".:53"]
additional_configs:
- errors
- ready
- prometheus :9153
- loop
- reload
- loadbalance
cache: 30
kubernetes:
zones:
- "{{ .kubernetes.networking.dns_domain }}"
# rewrite performs internal message rewriting.
# rewrite:
# # specify multiple rules and an incoming query matches multiple rules.
# # continue: if the rewrite rule is not matched, the next rule will be matched.
# # stop: if the rewrite rule is not matched, the next rule will not be matched.
# - rule: continue
# # support: type, name, class, edns0, ttl, cname
# # type: the type field of the request will be rewritten. FROM/TO must be a DNS record type (A, MX, etc.).
# # name: the query name in the request is rewritten; by default this is a full match of the name
# # class: the class of the message will be rewritten.
# # edns0: an EDNS0 option can be appended to the request as described below in the EDNS0 Options section.
# # ttl: the TTL value in the response is rewritten.
# # cname: the CNAME target if the response has a CNAME record
# field: name
# # this optional element can be specified for a name or ttl field.
# # exact: the name must be exactly the same as the value.
# # prefix: the name must start with the value.
# # suffix: the name must end with the value.
# # substring: the name must contain the value.
# # regex: the name must match the value.
# type: exact
# value: "example.com example2.com"
# # for field name further options are possible controlling the response rewrites.
# # answer auto: the names in the response is rewritten in a best effort manner.
# # answer name FROM TO: the query name in the response is rewritten matching the from regex pattern.
# # answer value FROM TO: the names in the response is rewritten matching the from regex pattern.
# options: ""
forward:
# the base domain to match for the request to be forwarded.
- from: "."
# the destination endpoints to forward to. The TO syntax allows you to specify a protocol
to: ["/etc/resolv.conf"]
# a space-separated list of domains to exclude from forwarding.
except: []
# use TCP even when the request comes in over UDP.
force_tcp: false
# try first using UDP even when the request comes in over TCP.
# If response is truncated (TC flag set in response) then do another attempt over TCP.
prefer_udp: false
# the number of subsequent failed health checks that are needed before considering an upstream to be down
# If 0, the upstream will never be marked as down (nor health checked).
# max_fails: 2
# expire (cached) connections after this time,
# expire: 10s
# define the TLS properties for TLS connection.
# tls:
# # the path to the certificate file.
# cert_file: ""
# # the path to the key file.
# key_file: ""
# # the path to the CA certificate file.
# ca_file: ""
# # allows you to set a server name in the TLS configuration
# tls_servername: ""
# specifies the policy to use for selecting upstream servers. The default is random.
# random: a policy that implements random upstream selection.
# round_robin: a policy that selects hosts based on round robin ordering.
# sequential: a policy that selects hosts based on sequential ordering.
# policy: "random"
# configure the behaviour of health checking of the upstream servers
# format: DURATION [no_rec] [domain FQDN]
# <duration>: use a different duration for health checking, the default duration is 0.5s.
# no_rec:optional argument that sets the RecursionDesired-flag of the dns-query used in health checking to false. The flag is default true.
# domain FQDN: set the domain name used for health checks to FQDN. If not configured, the domain name used for health checks is .
# health_check: ""
# limit the number of concurrent queries to MAX.
max_concurrent: 1000
# Specify a stable IP address or DNS name for the control plane.
# To achieve high availability in a cluster, it is recommended to set control_plane_endpoint to a DNS domain name when deploying the cluster. You can choose from the following options:
# 1. When a DNS domain name is available:
# Set control_plane_endpoint to the DNS domain name, and configure the domain name to point to all control_plane node IPs.
# 2. When a DNS domain name is not available:
# Set control_plane_endpoint to a DNS domain name that can be extended later. Add the DNS domain name resolution to the localDNS file on each node with the format:
#{{ vip }} {{ control_plane_endpoint }}
# - When a VIP is available:
# Deploy kube-vip on the control_plane nodes to map the VIP to the actual control_plane node IPs.
# - When a VIP is not available:
# Deploy HAProxy on the worker nodes. Map a fixed IP address (e.g., 127.0.0.2) as the VIP and route this VIP to all control_plane node IPs.
#
# Non-High-Availability Scenario: (Installation is not provided; parameters are provided for users to configure manually.)
# In this case, set the VIP to one of the control_plane nodes.
control_plane_endpoint:
host: lb.kubesphere.local
port: "{{ .kubernetes.apiserver.port }}"
# support local, kube_vip, haproxy
# when type is local, # if set will write in localDNS file.
# - for control-plane: will use 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
# - for worker: will use {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
type: local
kube_vip:
# the ip address of node net. usage in node network interface: "eth0"
# address:
# support ARP or BGP
mode: ARP
image:
registry: >-
{{ .dockerio_registry }}
repository: plndr/kube-vip
tag: v0.7.2
haproxy:
# the ip address in node network interface: "lo"
address: 127.0.0.1
health_port: 8081
image:
registry: >-
{{ .dockerio_registry }}
repository: library/haproxy
tag: 2.9.6-alpine
etcd:
# It is possible to deploy etcd with three methods.
# external: Deploy etcd cluster with external etcd cluster.
# internal: Deploy etcd cluster by static pod.
deployment_type: external
image:
registry: >-
{{ .dockerio_registry }}
repository: kubesphere/etcd
tag: 3.5.0
# custom_labels: {}
# if auto renew kubernetes certs
certs:
# Certificate Authority (CA) files in Kubernetes come from three sources:
# 1. kubeadm: ca_cert and ca_key set empty. Automatically generated by kubeadm.
# These certificates have a 10-year expiration period and remain unchanged.
# 2. kubekey: ca_cert set to {{ .binary_dir }}/pki/ca.cert and ca_key set to {{ .binary_dir }}/pki/ca.key.
# Automatically generated by kubekey. These certificates also have a 10-year validity period
# and can be modified using `cert.ca_date`.
# 3. custom: ca_cert and ca_key set to existing files. Custom CA files provided manually.
#
# If you want to use custom CA files, specify the absolute paths to your ca_cert and ca_key files below.
# If left empty, the default behavior (kubeadm or kubekey) will be used.
ca_cert: ""
ca_key: ""
# The following fields are for the Kubernetes front-proxy CA certificate and key.
# If you want to use custom front-proxy CA files, specify the absolute paths below.
# If left empty, the default behavior will be used.
front_proxy_cert: ""
front_proxy_key: ""
renew: true
localDNS:
- /etc/hosts

View File

@ -1,24 +0,0 @@
# Default Kubernetes configuration for deletion playbooks
kubernetes:
# The CNI plugin used by the cluster. Default is "calico"
kube_network_plugin: "calico"
# Whether to delete the container runtime interface (CRI) on target nodes
# Set to true to uninstall CRI (e.g., containerd, docker)
deleteCRI: false
# Whether to delete etcd on target nodes
# Set to true to uninstall etcd
deleteETCD: false
# Whether to delete local DNS entries in the localDNS file created by kubekey
# Set to true to remove kubekey-managed DNS entries from the specified files
deleteDNS: false
# Whether to delete the image registry on target nodes
# Set to true to uninstall the image registry
deleteImageRegistry: false
# List of local DNS files to be cleaned up if deleteDNS is true
localDNS:
- /etc/hosts

View File

@ -0,0 +1,193 @@
kubernetes:
cluster_name: kubekey
# Supported network plugins: flannel, calico
kube_network_plugin: calico
# The image repository for Kubernetes components.
image_repository: >-
{{ .dockerio_registry }}/kubesphere
# Minimum memory (in MB) required for each kube_worker node.
# This value must be at least minimal_node_memory_mb.
minimal_node_memory_mb: 10
# Maximum number of pods allowed per node.
max_pods: 110
audit: false
networking:
# The complete pod CIDR for the cluster. Supports: ipv4, ipv6, or dual-stack (ipv4,ipv6).
pod_cidr: 10.233.64.0/18
# Subnet mask size for IPv4 pod CIDR on each node.
ipv4_mask_size: 24
# Subnet mask size for IPv6 pod CIDR on each node.
ipv6_mask_size: 64
# The complete service CIDR for the cluster. Supports: ipv4, ipv6, or dual-stack (ipv4,ipv6).
service_cidr: 10.233.0.0/18
dns_domain: cluster.local
dns_image:
registry: >-
{{ .dockerio_registry }}
repository: >-
coredns
tag: 1.8.6
dns_cache_image:
registry: >-
{{ .dockerio_registry }}
repository: kubesphere/k8s-dns-node-cache
tag: 1.22.20
dns_service_ip: >-
{{ index (.kubernetes.networking.service_cidr | ipInCIDR) 2 }}
# The IP address for nodelocaldns to bind.
clusterDNS: 169.254.25.10
apiserver:
port: 6443
certSANs: []
extra_args:
# Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
controller_manager:
extra_args:
# Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
scheduler:
extra_args:
# Example: feature-gates: ExpandCSIVolumes=true,CSIStorageCapacity=true,RotateKubeletServerCertificate=true
kube_proxy:
enabled: true
# Supported proxy modes: ipvs, iptables
mode: "ipvs"
config:
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
kubelet:
max_pod: 110
pod_pids_limit: 10000
# feature_gates:
container_log_max_size: 5Mi
container_log_max_files: 3
# extra_args:
coredns:
dns_etc_hosts: []
# DNS zone configuration
zone_configs:
# Each entry defines DNS zones to match. Default port is 53.
# ".": matches all DNS zones.
# "example.com": matches *.example.com using DNS server on port 53.
# "example.com:54": matches *.example.com using DNS server on port 54.
- zones: [".:53"]
additional_configs:
- errors
- ready
- prometheus :9153
- loop
- reload
- loadbalance
cache: 30
kubernetes:
zones:
- "{{ .kubernetes.networking.dns_domain }}"
# Internal DNS message rewriting can be configured here.
# rewrite:
# - rule: continue
# field: name
# type: exact
# value: "example.com example2.com"
# options: ""
forward:
# Forwarding rules for DNS queries.
- from: "."
# Destination endpoints for forwarding. The TO syntax allows protocol specification.
to: ["/etc/resolv.conf"]
# List of domains to exclude from forwarding.
except: []
# Use TCP for forwarding even if the request was over UDP.
force_tcp: false
# Prefer UDP for forwarding, retry with TCP if response is truncated.
prefer_udp: false
# Number of consecutive failed health checks before marking an upstream as down.
# max_fails: 2
# Time after which cached connections expire.
# expire: 10s
# TLS properties for secure connections can be set here.
# tls:
# cert_file: ""
# key_file: ""
# ca_file: ""
# tls_servername: ""
# Policy for selecting upstream servers: random (default), round_robin, sequential.
# policy: "random"
# Health check configuration for upstream servers.
# health_check: ""
# Maximum number of concurrent DNS queries.
max_concurrent: 1000
# Specify a stable IP address or DNS name for the control plane endpoint.
# For high availability, it is recommended to use a DNS domain name for control_plane_endpoint.
# Options:
# 1. If a DNS domain name is available:
# - Set control_plane_endpoint to the DNS name and configure it to resolve to all control plane node IPs.
# 2. If a DNS domain name is not available:
# - Set control_plane_endpoint to a DNS name that can be added later.
# - Add the DNS name resolution to the localDNS file on each node in the format:
# {{ vip }} {{ control_plane_endpoint }}
# - If a VIP is available:
# Deploy kube-vip on control plane nodes to map the VIP to the actual node IPs.
# - If a VIP is not available:
# Deploy HAProxy on worker nodes. Map a fixed IP (e.g., 127.0.0.2) as the VIP and route it to all control plane node IPs.
#
# Non-HA scenario: (No installation provided; parameters are for manual configuration.)
# In this case, set the VIP to one of the control plane nodes.
control_plane_endpoint:
host: lb.kubesphere.local
port: "{{ .kubernetes.apiserver.port }}"
# Supported types: local, kube_vip, haproxy
# If type is local, the following applies:
# - On control-plane nodes: 127.0.0.1 {{ .kubernetes.control_plane_endpoint.host }}
# - On worker nodes: {{ .init_kubernetes_node }} {{ .kubernetes.control_plane_endpoint.host }}
type: local
kube_vip:
# The IP address of the node's network interface (e.g., "eth0").
# address:
# Supported modes: ARP, BGP
mode: ARP
image:
registry: >-
{{ .dockerio_registry }}
repository: plndr/kube-vip
tag: v0.7.2
haproxy:
# The IP address on the node's "lo" (loopback) interface.
address: 127.0.0.1
health_port: 8081
image:
registry: >-
{{ .dockerio_registry }}
repository: library/haproxy
tag: 2.9.6-alpine
etcd:
# etcd can be deployed in three ways:
# - external: Use an external etcd cluster.
# - internal: Deploy etcd as a static pod.
deployment_type: external
image:
registry: >-
{{ .dockerio_registry }}
repository: kubesphere/etcd
tag: 3.5.0
# custom_labels: {}
# Enable or disable automatic renewal of Kubernetes certificates.
certs:
# Kubernetes Certificate Authority (CA) files can be provided in three ways:
# 1. kubeadm: Leave ca_cert and ca_key empty to let kubeadm generate them automatically.
# These certificates are valid for 10 years and remain unchanged.
# 2. kubekey: Set ca_cert to {{ .binary_dir }}/pki/ca.cert and ca_key to {{ .binary_dir }}/pki/ca.key.
# These are generated by kubekey, valid for 10 years, and can be updated using `cert.ca_date`.
# 3. custom: Provide your own CA files by specifying the absolute paths for ca_cert and ca_key.
#
# To use custom CA files, specify their absolute paths below.
# If left empty, the default behavior (kubeadm or kubekey) will be used.
ca_cert: ""
ca_key: ""
# The following fields are for the Kubernetes front-proxy CA certificate and key.
# To use custom front-proxy CA files, specify their absolute paths below.
# If left empty, the default behavior will be used.
front_proxy_cert: ""
front_proxy_key: ""
renew: true

View File

@ -1,6 +1,8 @@
---
- name: OS | Initialize new Kubernetes nodes
when: .kubernetes_install_LoadState.stdout | eq "not-found"
when:
- .groups.k8s_cluster | default list | has .inventory_hostname
- .kubernetes_install_LoadState.stdout | eq "not-found"
block:
- include_tasks: init_repository.yaml
- name: OS | Reset temporary directory

View File

@ -59,6 +59,8 @@ spec:
vars:
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- hosts
type: object
description: Groups nodes. a group contains repeated nodes
type: object
@ -73,6 +75,8 @@ spec:
> group vars > inventory vars'
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- hosts
type: object
status:
description: InventoryStatus of Inventory

View File

@ -59,6 +59,8 @@ spec:
vars:
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- hosts
type: object
description: Groups nodes. a group contains repeated nodes
type: object
@ -73,6 +75,8 @@ spec:
> group vars > inventory vars'
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- hosts
type: object
status:
description: InventoryStatus of Inventory

View File

@ -43,6 +43,7 @@ func init() {
FS: core.BuiltinPlaybook,
basePlaybook: playbook.Spec.Playbook,
Playbook: &kkprojectv1.Playbook{},
config: playbook.Spec.Config.Value(),
}, nil
}
}

View File

@ -68,6 +68,7 @@ func newGitProject(ctx context.Context, playbook kkcorev1.Playbook, update bool)
FS: os.DirFS(filepath.Join(projectDir, playbook.Spec.Project.Name)),
basePlaybook: playbook.Spec.Playbook,
Playbook: &kkprojectv1.Playbook{},
config: playbook.Spec.Config.Value(),
}, nil
}

View File

@ -50,5 +50,6 @@ func newLocalProject(playbook kkcorev1.Playbook) (Project, error) {
FS: os.DirFS(projectPath),
basePlaybook: relPath,
Playbook: &kkprojectv1.Playbook{},
config: playbook.Spec.Config.Value(),
}, nil
}

View File

@ -32,6 +32,7 @@ import (
"gopkg.in/yaml.v3"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@ -74,10 +75,10 @@ func New(ctx context.Context, playbook kkcorev1.Playbook, update bool) (Project,
// project implements the Project interface using an fs.FS
type project struct {
fs.FS
basePlaybook string
*kkprojectv1.Playbook
config map[string]any
}
// ReadFile reads and returns the contents of the file at the given path
@ -182,8 +183,12 @@ func (f *project) dealImportPlaybook(p kkprojectv1.Play, basePlaybook string) er
// dealVarsFiles handles the "var_files" argument in a play
func (f *project) dealVarsFiles(p *kkprojectv1.Play, basePlaybook string) error {
for _, varsFile := range p.VarsFiles {
for _, varsFileStr := range p.VarsFiles {
// load vars from vars_files
varsFile, err := tmpl.ParseFunc(f.config, varsFileStr, func(b []byte) string { return string(b) })
if err != nil {
return errors.Errorf("failed to parse varFile %q", varsFileStr)
}
file := f.getPath(GetVarsFilesRelPath(basePlaybook, varsFile))
if file == "" {
return errors.Errorf("failed to find vars_files %q base on %q. it's should be:\n %s", varsFile, basePlaybook, PathFormatVarsFile)

View File

@ -25,6 +25,7 @@ import (
"sync"
"github.com/cockroachdb/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -148,7 +149,11 @@ func (s fileStorage) Watch(_ context.Context, key string, _ apistorage.ListOptio
func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions, out runtime.Object) error {
data, err := os.ReadFile(key + yamlSuffix)
if err != nil {
return errors.Wrapf(err, "failed to read object file %q", key)
if os.IsNotExist(err) {
// Return a NotFound error with dummy GroupResource and key as name.
return apierrors.NewNotFound(s.resource, key)
}
return err
}
return decode(s.codec, data, out)
@ -174,6 +179,10 @@ func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.List
// Get the root entries in the directory corresponding to 'key'.
rootEntries, isAllNamespace, err := s.getRootEntries(key)
if err != nil {
if os.IsNotExist(err) {
// Return a NotFound error with dummy GroupResource and key as name.
return apierrors.NewNotFound(s.resource, key)
}
return err
}
@ -189,6 +198,10 @@ func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.List
err = s.processResourceFile(key, entry, v, continueKeyMatchRule, resourceVersionMatchRule, &lastKey, opts, listObj)
}
if err != nil {
if os.IsNotExist(err) {
// Return a NotFound error with dummy GroupResource and key as name.
return apierrors.NewNotFound(s.resource, key)
}
return err
}
// Check if we have reached the limit of results requested by the client.

View File

@ -46,6 +46,13 @@ type Result struct {
Result any `json:"result"`
}
// SetResult sets the Result field of the Result struct and returns the updated Result.
// This is useful for chaining or for returning a Result with additional data.
func (r Result) SetResult(result any) Result {
r.Result = result
return r
}
// ListResult is a generic struct representing a paginated list response.
// T is a type parameter for the type of items in the list.
// Items contains the list of results, and TotalItems indicates the total number of items available.

View File

@ -11,6 +11,7 @@ import (
jsonpatch "github.com/evanphx/json-patch"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -81,6 +82,17 @@ func (h *InventoryHandler) Patch(request *restful.Request, response *restful.Res
api.HandleError(response, request, errors.Wrapf(err, "failed to get Inventory %s/%s from cluster", namespace, inventoryName))
return
}
// Pre-process: ensure all groups have hosts as arrays instead of null
// This is necessary because JSON patch operations like "add" with "-" path
// require the target to be an array, not null
if oldInventory.Spec.Groups != nil {
for groupName, group := range oldInventory.Spec.Groups {
if group.Hosts == nil {
group.Hosts = []string{}
oldInventory.Spec.Groups[groupName] = group
}
}
}
// Encode the old inventory object to JSON
oldInventoryJSON, err := runtime.Encode(codec, oldInventory)
if err != nil {
@ -89,7 +101,7 @@ func (h *InventoryHandler) Patch(request *restful.Request, response *restful.Res
}
// Apply the patch to the old inventory and decode the result
applyPatchAndDecode := func() (*kkcorev1.Inventory, error) {
applyPatchAndDecode := func(objectJSON []byte) (*kkcorev1.Inventory, error) {
var patchedJSON []byte
switch patchType {
case types.JSONPatchType:
@ -97,13 +109,13 @@ func (h *InventoryHandler) Patch(request *restful.Request, response *restful.Res
if err != nil {
return nil, errors.Wrap(err, "failed to decode JSON patch")
}
patchedJSON, err = patchObj.Apply(oldInventoryJSON)
patchedJSON, err = patchObj.Apply(objectJSON)
if err != nil {
return nil, errors.Wrap(err, "failed to apply JSON patch to old inventory JSON")
}
case types.MergePatchType:
var err error
patchedJSON, err = jsonpatch.MergePatch(oldInventoryJSON, patchBody)
patchedJSON, err = jsonpatch.MergePatch(objectJSON, patchBody)
if err != nil {
return nil, errors.Wrap(err, "failed to apply merge patch to old inventory JSON")
}
@ -118,7 +130,7 @@ func (h *InventoryHandler) Patch(request *restful.Request, response *restful.Res
return newInventory, nil
}
updatedInventory, err := applyPatchAndDecode()
updatedInventory, err := applyPatchAndDecode(oldInventoryJSON)
if err != nil {
api.HandleError(response, request, errors.Wrap(err, "failed to apply patch and decode inventory"))
return
@ -264,10 +276,13 @@ func (h *InventoryHandler) Info(request *restful.Request, response *restful.Resp
name := request.PathParameter("inventory")
inventory := &kkcorev1.Inventory{}
err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, inventory)
if err != nil {
api.HandleError(response, request, err)
if apierrors.IsNotFound(err) {
_ = response.WriteEntity(api.SUCCESS.SetResult("waiting for inventory to be created"))
} else {
api.HandleError(response, request, err)
}
return
}
@ -285,7 +300,11 @@ func (h *InventoryHandler) ListHosts(request *restful.Request, response *restful
inventory := &kkcorev1.Inventory{}
err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, inventory)
if err != nil {
api.HandleError(response, request, err)
if apierrors.IsNotFound(err) {
_ = response.WriteEntity(api.SUCCESS.SetResult("waiting for inventory to be created"))
} else {
api.HandleError(response, request, err)
}
return
}

View File

@ -263,7 +263,7 @@ func (h *PlaybookHandler) Delete(request *restful.Request, response *restful.Res
err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, playbook)
if err != nil {
if apierrors.IsNotFound(err) {
_ = response.WriteEntity(api.SUCCESS)
_ = response.WriteEntity(api.SUCCESS.SetResult("playbook has deleted"))
} else {
api.HandleError(response, request, err)
}
@ -273,7 +273,11 @@ func (h *PlaybookHandler) Delete(request *restful.Request, response *restful.Res
playbookManager.stopPlaybook(playbook)
// Delete the playbook resource.
if err := h.client.Delete(request.Request.Context(), playbook); err != nil {
api.HandleError(response, request, err)
if apierrors.IsNotFound(err) {
_ = response.WriteEntity(api.SUCCESS.SetResult("playbook has deleted"))
} else {
api.HandleError(response, request, err)
}
return
}
// Delete related log file and directory.
@ -284,7 +288,11 @@ func (h *PlaybookHandler) Delete(request *restful.Request, response *restful.Res
"playbook.name": playbook.Name,
"playbook.uid": string(playbook.UID),
}); err != nil {
api.HandleError(response, request, err)
if apierrors.IsNotFound(err) {
_ = response.WriteEntity(api.SUCCESS.SetResult("playbook has deleted"))
} else {
api.HandleError(response, request, err)
}
return
}

View File

@ -55,7 +55,7 @@ func (h ResourceHandler) ConfigInfo(request *restful.Request, response *restful.
file, err := os.Open(filepath.Join(h.rootPath, api.SchemaConfigFile))
if err != nil {
if os.IsNotExist(err) {
_ = response.WriteError(http.StatusNotFound, err)
_ = response.WriteEntity(api.SUCCESS.SetResult("waiting for config to be created"))
} else {
_ = response.WriteError(http.StatusInternalServerError, err)
}