mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
feat: enhance precheck tasks for image registry and network validation (#2676)
* feat: enhance precheck tasks for image registry and network validation - Added a task to ensure successful authentication to the image registry. - Updated existing tasks to provide clearer failure messages for required configurations. - Improved validation for network interfaces and CIDR configurations, ensuring dual-stack support. - Enhanced error handling in the resource handler for playbook creation. Signed-off-by: joyceliu <joyceliu@yunify.com> * feat: enhance configuration and query handling - Added `-trimpath` flag to Go build configuration for improved binary paths. - Updated REST configuration to set QPS and Burst limits for better performance. - Refactored query handling to use string types for field and value, improving type consistency. - Enhanced error handling in resource configuration updates and improved parsing of request bodies. Signed-off-by: joyceliu <joyceliu@yunify.com> * feat: check inventory when it's changed Signed-off-by: joyceliu <joyceliu@yunify.com> * feat: enhance playbook execution and query handling - Added a new optional query parameter `promise` to the playbook and inventory endpoints, allowing for asynchronous execution control. - Introduced a new result state `ResultPending` to indicate ongoing operations. - Refactored the executor function to handle the `promise` parameter, enabling conditional execution of playbooks. - Improved error handling and logging during playbook execution. Signed-off-by: joyceliu <joyceliu@yunify.com> --------- Signed-off-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
620b7f56a3
commit
348c9b2d15
|
|
@ -139,13 +139,18 @@ linters-settings:
|
|||
# List of allowed packages.
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/Masterminds/sprig
|
||||
- github.com/cockroachdb/errors
|
||||
- github.com/containerd/containerd/images
|
||||
- github.com/emicklei/go-restful-openapi/v2
|
||||
- github.com/emicklei/go-restful/v3
|
||||
- github.com/evanphx/json-patch
|
||||
- github.com/fsnotify/fsnotify
|
||||
- github.com/go-git/go-git
|
||||
- github.com/go-openapi/spec
|
||||
- github.com/google/go-cmp/cmp
|
||||
- github.com/google/gops
|
||||
- github.com/kubesphere/kubekey
|
||||
- github.com/Masterminds/sprig
|
||||
- github.com/opencontainers/image-spec
|
||||
- github.com/pkg/sftp
|
||||
- github.com/schollz/progressbar
|
||||
|
|
@ -154,10 +159,6 @@ linters-settings:
|
|||
- k8s.io
|
||||
- oras.land/oras-go
|
||||
- sigs.k8s.io
|
||||
- github.com/emicklei/go-restful/v3
|
||||
- github.com/google/go-cmp/cmp
|
||||
- github.com/emicklei/go-restful-openapi/v2
|
||||
- github.com/go-openapi/spec
|
||||
forbidigo:
|
||||
# Forbid the following identifiers (list of regexp).
|
||||
# Default: ["^(fmt\\.Print(|f|ln)|print|println)$"]
|
||||
|
|
@ -244,19 +245,30 @@ linters-settings:
|
|||
# oci
|
||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
||||
alias: imagev1
|
||||
# kubekey
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
alias: _const
|
||||
- pkg: "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
|
||||
alias: capkkinfrav1beta1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/core/v1"
|
||||
alias: kkcorev1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/core/v1alpha1"
|
||||
alias: kkcorev1alpha1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/project/v1"
|
||||
alias: kkprojectv1
|
||||
# Kubernetes
|
||||
- pkg: "k8s.io/api/batch/v1"
|
||||
alias: batchv1
|
||||
- pkg: "k8s.io/api/coordination/v1"
|
||||
alias: coordinationv1
|
||||
- pkg: "k8s.io/api/core/v1"
|
||||
alias: corev1
|
||||
- pkg: "k8s.io/api/batch/v1"
|
||||
alias: batchv1
|
||||
- pkg: "k8s.io/api/rbac/v1"
|
||||
alias: rbacv1
|
||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
alias: metav1
|
||||
- pkg: "k8s.io/apimachinery/pkg/api/errors"
|
||||
alias: apierrors
|
||||
- pkg: "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
alias: metav1
|
||||
- pkg: "k8s.io/apimachinery/pkg/util/runtime"
|
||||
alias: utilruntime
|
||||
- pkg: "k8s.io/client-go/tools/cache"
|
||||
|
|
@ -278,24 +290,16 @@ linters-settings:
|
|||
alias: ctrlcontroller
|
||||
- pkg: "sigs.k8s.io/controller-runtime/pkg/finalizer"
|
||||
alias: ctrlfinalizer
|
||||
# kubekey
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
alias: _const
|
||||
- pkg: "github.com/kubesphere/kubekey/api/core/v1"
|
||||
alias: kkcorev1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/core/v1alpha1"
|
||||
alias: kkcorev1alpha1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/project/v1"
|
||||
alias: kkprojectv1
|
||||
- pkg: "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
|
||||
alias: capkkinfrav1beta1
|
||||
# cluster-api
|
||||
- pkg: "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
alias: clusterv1beta1
|
||||
- pkg: "sigs.k8s.io/cluster-api/util"
|
||||
alias: clusterutil
|
||||
- pkg: "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
|
||||
alias: kubeadmcpv1beta1
|
||||
- pkg: "sigs.k8s.io/cluster-api/util"
|
||||
alias: clusterutil
|
||||
# common
|
||||
- pkg: "github.com/evanphx/json-patch"
|
||||
alias: jsonpatch
|
||||
nestif:
|
||||
# Minimal complexity of if statements to report.
|
||||
# Default: 5
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ builds:
|
|||
- arm64
|
||||
binary: kk
|
||||
ldflags: "{{ .Env.LDFLAGS }}"
|
||||
flags:
|
||||
- -trimpath
|
||||
tags:
|
||||
- builtin
|
||||
|
||||
|
|
|
|||
|
|
@ -1,18 +1,29 @@
|
|||
- name: Ensure image registry authentication is successful
|
||||
when: .image_registry.auth | empty | not
|
||||
run_once: true
|
||||
command: |
|
||||
HTTP_CODE=$(curl -skLI -w "%{http_code}" -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" "https://{{ .image_registry.auth.registry }}/v2/" -o /dev/null)
|
||||
if [[ "$HTTP_CODE" == "200" ]]; then
|
||||
echo "Authentication to image registry succeeded."
|
||||
else
|
||||
echo "Authentication to image registry {{ .image_registry.auth.registry }} failed." >&2
|
||||
fi
|
||||
|
||||
# image_registry is installed by docker_compose
|
||||
- name: docker_version and dockercompose_version should not be empty
|
||||
- name: Ensure docker and docker-compose versions are set for image registry
|
||||
when: .groups.image_registry | empty | not
|
||||
assert:
|
||||
that:
|
||||
- .docker_version | empty | not
|
||||
- .dockercompose_version | empty | not
|
||||
msg: >-
|
||||
"docker_version" and "dockercompose_version" should not be empty
|
||||
Both "docker_version" and "dockercompose_version" must be specified for the image registry.
|
||||
|
||||
- name: keepalived_version should not be empty when image_registry is high availability
|
||||
- name: Ensure keepalived_version is set for high availability image registry
|
||||
when:
|
||||
- .image_registry.ha_vip | empty | not
|
||||
- .groups.image_registry | len | lt 1
|
||||
assert:
|
||||
that: .keepalived_version | empty | not
|
||||
msg: >-
|
||||
"keepalived_version" should not be empty when image_registry is high availability
|
||||
"keepalived_version" must be specified when the image registry is configured for high availability.
|
||||
|
|
@ -1,36 +1,34 @@
|
|||
---
|
||||
- name: Should found network interface
|
||||
- name: Ensure required network interfaces are present
|
||||
command: |
|
||||
{{- if .internal_ipv4 | empty | not }}
|
||||
if [ ! ip -o addr show | grep -q {{ .internal_ipv4 }} ]; then
|
||||
echo 'No ipv4 network interface found'
|
||||
exit 1
|
||||
if ! ip -o addr show | grep -q {{ .internal_ipv4 }}; then
|
||||
echo 'IPv4 network interface not found' >&2
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .internal_ipv6 | empty | not }}
|
||||
if [ ! ip -o addr show | grep -q {{ .internal_ipv6 }} ]; then
|
||||
echo 'No ipv6 network interface found'
|
||||
exit 1
|
||||
if ! ip -o addr show | grep -q {{ .internal_ipv6 }}; then
|
||||
echo 'IPv6 network interface not found' >&2
|
||||
fi
|
||||
{{- end }}
|
||||
|
||||
# https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
- name: Stop if cidr is not valid
|
||||
- name: Validate CIDR configuration
|
||||
run_once: true
|
||||
block:
|
||||
- name: Stop if pod cidr is not valid
|
||||
- name: Validate pod CIDR format
|
||||
when: .kubernetes.networking.pod_cidr | empty | not
|
||||
assert:
|
||||
that: .kubernetes.networking.pod_cidr | splitList "," | len | ge 2
|
||||
fail_msg: >-
|
||||
kubernetes.networking.pod_cidr should be ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Stop if service cidr is not valid
|
||||
kubernetes.networking.pod_cidr must be specified as ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Validate service CIDR format
|
||||
when: .kubernetes.networking.service_cidr | empty | not
|
||||
assert:
|
||||
that: .kubernetes.networking.service_cidr | splitList "," | len | ge 2
|
||||
fail_msg: >-
|
||||
kubernetes.networking.service_cidr should be ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Stop if pod networking is not support dual-stack
|
||||
kubernetes.networking.service_cidr must be specified as ipv4_cidr/ipv6_cidr or ipv4_cidr,ipv6_cidr
|
||||
- name: Ensure pod networking supports dual-stack
|
||||
when:
|
||||
- .kubernetes.networking.pod_cidr | empty | not
|
||||
- .kubernetes.networking.pod_cidr | splitList "," | len | eq 2
|
||||
|
|
@ -40,8 +38,8 @@
|
|||
- .kubernetes.networking.pod_cidr | splitList "," | first | ipFamily | eq "IPv4"
|
||||
- .kubernetes.networking.pod_cidr | splitList "," | last | ipFamily | eq "IPv6"
|
||||
fail_msg: >-
|
||||
Kubernetes introduced support for pod dual-stack networking starting from version v1.20.0.
|
||||
- name: Stop if service networking is not support dual-stack
|
||||
Dual-stack pod networking is supported in Kubernetes version v1.20.0 and above.
|
||||
- name: Ensure service networking supports dual-stack
|
||||
when:
|
||||
- .kubernetes.networking.service_cidr | empty | not
|
||||
- .kubernetes.networking.service_cidr | splitList "," | len | eq 2
|
||||
|
|
@ -51,41 +49,38 @@
|
|||
- .kubernetes.networking.service_cidr | splitList "," | first | ipFamily | eq "IPv4"
|
||||
- .kubernetes.networking.service_cidr | splitList "," | last | ipFamily | eq "IPv6"
|
||||
fail_msg: >-
|
||||
Kubernetes introduced support for service dual-stack networking starting from version v1.20.0.
|
||||
Dual-stack service networking is supported in Kubernetes version v1.20.0 and above.
|
||||
|
||||
- name: Stop if unknown network plugin
|
||||
- name: Fail if network plugin is unsupported
|
||||
run_once: true
|
||||
assert:
|
||||
that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin
|
||||
fail_msg: >-
|
||||
kube_network_plugin:"{{ .kubernetes.kube_network_plugin }}" is not supported
|
||||
The specified kube_network_plugin "{{ .kubernetes.kube_network_plugin }}" is not supported.
|
||||
when: .kubernetes.kube_network_plugin | empty | not
|
||||
|
||||
# This assertion will fail on the safe side: One can indeed schedule more pods
|
||||
# on a node than the CIDR-range has space for when additional pods use the host
|
||||
# network namespace. It is impossible to ascertain the number of such pods at
|
||||
# provisioning time, so to establish a guarantee, we factor these out.
|
||||
# NOTICE: the check blatantly ignores the inet6-case
|
||||
- name: Guarantee that enough network address space is available for all pods
|
||||
# Note: This assertion errs on the side of caution. It is technically possible to schedule more pods on a node than the available CIDR range allows, especially if some pods use the host network namespace. Since the number of such pods cannot be determined at provisioning time, this check provides a conservative guarantee.
|
||||
# Note: This check intentionally ignores the IPv6-only case.
|
||||
- name: Ensure sufficient network address space for all pods
|
||||
run_once: true
|
||||
when: .groups.k8s_cluster | default list | has .inventory_hostname
|
||||
block:
|
||||
- name: Guarantee that enough ipv4 network address space is available for all pods
|
||||
- name: Ensure sufficient IPv4 address space for pods
|
||||
when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first | ipFamily | eq "IPv4"
|
||||
assert:
|
||||
that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 32 (.kubernetes.networking.ipv4_mask_size | default 24)))) 2)
|
||||
fail_msg: do not schedule more pods on a node than ipv4 inet addresses are available.
|
||||
- name: Guarantee that enough ipv6 network address space is available for all pods
|
||||
fail_msg: Do not schedule more pods on a node than there are available IPv4 addresses in the pod CIDR range.
|
||||
- name: Ensure sufficient IPv6 address space for pods
|
||||
when: .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | last | ipFamily | eq "IPv6"
|
||||
assert:
|
||||
that: le (.kubernetes.kubelet.max_pods | default 110) (sub (pow 2 (float64 (sub 128 (.kubernetes.networking.ipv4_mask_size | default 64)))) 2)
|
||||
fail_msg: do not schedule more pods on a node than ipv6 inet addresses are available.
|
||||
fail_msg: Do not schedule more pods on a node than there are available IPv6 addresses in the pod CIDR range.
|
||||
|
||||
# https://github.com/alibaba/hybridnet/wiki/Getting-Started#install
|
||||
- name: Stop install hybridnet if Kubernetes version is not satisfied
|
||||
- name: Fail if Kubernetes version is too low for hybridnet
|
||||
run_once: true
|
||||
assert:
|
||||
that: .kube_version | semverCompare ">=v1.16.0"
|
||||
fail_msg: To install hybridnet in an exist Kubernetes (version > 1.16) cluster
|
||||
fail_msg: Hybridnet requires Kubernetes version 1.16 or higher.
|
||||
when:
|
||||
- .kubernetes.kube_network_plugin | eq "hybridnet"
|
||||
|
|
@ -125,7 +125,7 @@ func (o *CommonOptions) Run(ctx context.Context, playbook *kkcorev1.Playbook) er
|
|||
return errors.Wrapf(err, "failed to create local dir %q for playbook %q", o.Workdir, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}
|
||||
restconfig := &rest.Config{}
|
||||
restconfig := &rest.Config{QPS: 100, Burst: 200}
|
||||
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ func newWebCommand() *cobra.Command {
|
|||
Short: "start a http server with web UI.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Initialize REST config for Kubernetes client
|
||||
restconfig := &rest.Config{}
|
||||
restconfig := &rest.Config{QPS: 100, Burst: 200}
|
||||
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
1
go.mod
1
go.mod
|
|
@ -8,6 +8,7 @@ require (
|
|||
github.com/containerd/containerd v1.7.27
|
||||
github.com/emicklei/go-restful-openapi/v2 v2.11.0
|
||||
github.com/emicklei/go-restful/v3 v3.12.2
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/go-openapi/spec v0.21.0
|
||||
|
|
|
|||
|
|
@ -22,7 +22,15 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// convertBytesToMap with split string, only convert line which contain split
|
||||
// convertBytesToMap parses the given byte slice into a map[string]string using the provided split string.
|
||||
// Only lines containing the split string are processed. Each such line is split into key and value at the first occurrence of the split string.
|
||||
// Leading and trailing spaces are trimmed from both key and value.
|
||||
// Example input (split = "="):
|
||||
//
|
||||
// FOO=bar
|
||||
// BAZ = qux
|
||||
//
|
||||
// Result: map[string]string{"FOO": "bar", "BAZ": "qux"}
|
||||
func convertBytesToMap(bs []byte, split string) map[string]string {
|
||||
config := make(map[string]string)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(bs))
|
||||
|
|
@ -39,8 +47,21 @@ func convertBytesToMap(bs []byte, split string) map[string]string {
|
|||
return config
|
||||
}
|
||||
|
||||
// convertBytesToSlice with split string. only convert line which contain split.
|
||||
// group by empty line
|
||||
// convertBytesToSlice parses the given byte slice into a slice of map[string]string using the provided split string.
|
||||
// Only lines containing the split string are processed. Each such line is split into key and value at the first occurrence of the split string.
|
||||
// Leading and trailing spaces are trimmed from both key and value.
|
||||
// Groups of key-value pairs are separated by empty lines. Each group is stored as a separate map in the resulting slice.
|
||||
// Example input (split = ":"):
|
||||
//
|
||||
// foo: bar
|
||||
// baz: qux
|
||||
//
|
||||
// hello: world
|
||||
//
|
||||
// Result: []map[string]string{
|
||||
// {"foo": "bar", "baz": "qux"},
|
||||
// {"hello": "world"},
|
||||
// }
|
||||
func convertBytesToSlice(bs []byte, split string) []map[string]string {
|
||||
var config []map[string]string
|
||||
currentMap := make(map[string]string)
|
||||
|
|
|
|||
|
|
@ -17,14 +17,21 @@ limitations under the License.
|
|||
package _const
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
// GetWorkdirFromConfig retrieves the working directory from the provided configuration.
|
||||
|
|
@ -61,3 +68,55 @@ func Host2ProviderID(clusterName, host string) *string {
|
|||
func ProviderID2Host(clusterName string, providerID *string) string {
|
||||
return strings.TrimPrefix(ptr.Deref(providerID, ""), fmt.Sprintf("kk://%s/", clusterName))
|
||||
}
|
||||
|
||||
// NewTestPlaybook creates a fake controller-runtime client, an Inventory resource with the given hosts,
|
||||
// and returns the client and a Playbook resource referencing the created Inventory.
|
||||
// This is intended for use in unit tests.
|
||||
func NewTestPlaybook(hosts []string) (ctrlclient.Client, *kkcorev1.Playbook, error) {
|
||||
// Create a fake client with the required scheme and status subresources.
|
||||
client := fake.NewClientBuilder().
|
||||
WithScheme(Scheme).
|
||||
WithStatusSubresource(&kkcorev1.Playbook{}, &kkcorev1alpha1.Task{}).
|
||||
Build()
|
||||
|
||||
// Convert the slice of hostnames to an InventoryHost map.
|
||||
inventoryHost := make(kkcorev1.InventoryHost)
|
||||
for _, h := range hosts {
|
||||
inventoryHost[h] = runtime.RawExtension{}
|
||||
}
|
||||
|
||||
// Create an Inventory resource with the generated hosts.
|
||||
inventory := &kkcorev1.Inventory{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.InventorySpec{
|
||||
Hosts: inventoryHost,
|
||||
},
|
||||
}
|
||||
|
||||
// Persist the Inventory resource using the fake client.
|
||||
if err := client.Create(context.TODO(), inventory); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Create a Playbook resource that references the created Inventory.
|
||||
playbook := &kkcorev1.Playbook{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.PlaybookSpec{
|
||||
InventoryRef: &corev1.ObjectReference{
|
||||
Name: inventory.Name,
|
||||
Namespace: inventory.Namespace,
|
||||
},
|
||||
},
|
||||
Status: kkcorev1.PlaybookStatus{},
|
||||
}
|
||||
|
||||
return client, playbook, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,8 +37,8 @@ var (
|
|||
// the api group, unless you really know what you're doing.
|
||||
Scheme = newScheme()
|
||||
|
||||
// Codecs provides access to encoding and decoding for the scheme
|
||||
Codecs = serializer.NewCodecFactory(Scheme)
|
||||
// CodecFactory provides access to encoding and decoding for the scheme
|
||||
CodecFactory = serializer.NewCodecFactory(Scheme)
|
||||
|
||||
// ParameterCodec handles versioning of objects that are converted to query parameters.
|
||||
ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
|
|
|||
|
|
@ -1,65 +0,0 @@
|
|||
package _const
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
// NewTestPlaybook creates a fake controller-runtime client, an Inventory resource with the given hosts,
|
||||
// and returns the client and a Playbook resource referencing the created Inventory.
|
||||
// This is intended for use in unit tests.
|
||||
func NewTestPlaybook(hosts []string) (ctrlclient.Client, *kkcorev1.Playbook, error) {
|
||||
// Create a fake client with the required scheme and status subresources.
|
||||
client := fake.NewClientBuilder().
|
||||
WithScheme(Scheme).
|
||||
WithStatusSubresource(&kkcorev1.Playbook{}, &kkcorev1alpha1.Task{}).
|
||||
Build()
|
||||
|
||||
// Convert the slice of hostnames to an InventoryHost map.
|
||||
inventoryHost := make(kkcorev1.InventoryHost)
|
||||
for _, h := range hosts {
|
||||
inventoryHost[h] = runtime.RawExtension{}
|
||||
}
|
||||
|
||||
// Create an Inventory resource with the generated hosts.
|
||||
inventory := &kkcorev1.Inventory{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.InventorySpec{
|
||||
Hosts: inventoryHost,
|
||||
},
|
||||
}
|
||||
|
||||
// Persist the Inventory resource using the fake client.
|
||||
if err := client.Create(context.TODO(), inventory); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Create a Playbook resource that references the created Inventory.
|
||||
playbook := &kkcorev1.Playbook{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.PlaybookSpec{
|
||||
InventoryRef: &corev1.ObjectReference{
|
||||
Name: inventory.Name,
|
||||
Namespace: inventory.Namespace,
|
||||
},
|
||||
},
|
||||
Status: kkcorev1.PlaybookStatus{},
|
||||
}
|
||||
|
||||
return client, playbook, nil
|
||||
}
|
||||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/cockroachdb/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/utils"
|
||||
)
|
||||
|
||||
// default function docs: http://masterminds.github.io/sprig
|
||||
|
|
@ -58,7 +58,7 @@ func fromYAML(v string) (any, error) {
|
|||
func ipInCIDR(cidr string) ([]string, error) {
|
||||
var ips = make([]string, 0)
|
||||
for _, s := range strings.Split(cidr, ",") {
|
||||
ips = append(ips, _const.ParseIP(s)...)
|
||||
ips = append(ips, utils.ParseIP(s)...)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ func newAPIIResources(gv schema.GroupVersion) *apiResources {
|
|||
minRequestTimeout: defaultMinRequestTimeout,
|
||||
|
||||
typer: _const.Scheme,
|
||||
serializer: _const.Codecs,
|
||||
serializer: _const.CodecFactory,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ func newReqScope(resources *apiResources, o resourceOptions, authz authorizer.Au
|
|||
Namer: meta.NewAccessor(),
|
||||
ClusterScoped: false,
|
||||
},
|
||||
Serializer: _const.Codecs,
|
||||
Serializer: _const.CodecFactory,
|
||||
ParameterCodec: _const.ParameterCodec,
|
||||
Creater: _const.Scheme,
|
||||
Convertor: _const.Scheme,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package _const
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package _const
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
package utils
|
||||
|
||||
// RemoveDuplicatesInOrder removes duplicate elements from a slice while preserving the original order.
|
||||
// It works for any slice of comparable type T.
|
||||
// Example: RemoveDuplicatesInOrder([]int{1,2,2,3}) returns []int{1,2,3}
|
||||
func RemoveDuplicatesInOrder[T comparable](arr []T) []T {
|
||||
encountered := make(map[T]bool)
|
||||
result := make([]T, 0, len(arr)) // Preallocate capacity to avoid multiple allocations
|
||||
|
||||
for _, v := range arr {
|
||||
if !encountered[v] {
|
||||
encountered[v] = true
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
@ -31,6 +31,8 @@ const (
|
|||
ResultSucceed = "success"
|
||||
// ResultFailed indicates a failed operation result.
|
||||
ResultFailed = "failed"
|
||||
// ResultPending indicates a pending operation result.
|
||||
ResultPending = "pending"
|
||||
)
|
||||
|
||||
// SUCCESS is a global variable representing a successful operation result with a default success message.
|
||||
|
|
|
|||
|
|
@ -26,46 +26,57 @@ type manager struct {
|
|||
manager map[string]context.CancelFunc // Map of playbook key to its cancel function
|
||||
}
|
||||
|
||||
func (m *manager) executor(playbook *kkcorev1.Playbook, client ctrlclient.Client) error {
|
||||
// Build the log file path for the playbook execution
|
||||
filename := filepath.Join(
|
||||
_const.GetWorkdirFromConfig(playbook.Spec.Config),
|
||||
_const.RuntimeDir,
|
||||
kkcorev1.SchemeGroupVersion.Group,
|
||||
kkcorev1.SchemeGroupVersion.Version,
|
||||
"playbooks",
|
||||
playbook.Namespace,
|
||||
playbook.Name,
|
||||
playbook.Name+".log",
|
||||
)
|
||||
// Ensure the directory for the log file exists
|
||||
if _, err := os.Stat(filepath.Dir(filename)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "failed to stat playbook dir %q", filepath.Dir(filename))
|
||||
func (m *manager) executor(playbook *kkcorev1.Playbook, client ctrlclient.Client, promise string) error {
|
||||
f := func() error {
|
||||
// Build the log file path for the playbook execution
|
||||
filename := filepath.Join(
|
||||
_const.GetWorkdirFromConfig(playbook.Spec.Config),
|
||||
_const.RuntimeDir,
|
||||
kkcorev1.SchemeGroupVersion.Group,
|
||||
kkcorev1.SchemeGroupVersion.Version,
|
||||
"playbooks",
|
||||
playbook.Namespace,
|
||||
playbook.Name,
|
||||
playbook.Name+".log",
|
||||
)
|
||||
// Ensure the directory for the log file exists
|
||||
if _, err := os.Stat(filepath.Dir(filename)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "failed to stat playbook dir %q", filepath.Dir(filename))
|
||||
}
|
||||
// If directory does not exist, create it
|
||||
if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
|
||||
return errors.Wrapf(err, "failed to create playbook dir %q", filepath.Dir(filename))
|
||||
}
|
||||
}
|
||||
// If directory does not exist, create it
|
||||
if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
|
||||
return errors.Wrapf(err, "failed to create playbook dir %q", filepath.Dir(filename))
|
||||
// Open the log file for writing
|
||||
file, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open log file", "file", filename)
|
||||
}
|
||||
}
|
||||
// Open the log file for writing
|
||||
file, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open log file", "file", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
defer file.Close()
|
||||
|
||||
// Create a cancellable context for playbook execution
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Register the playbook and its cancel function in the playbookManager
|
||||
m.addPlaybook(playbook, cancel)
|
||||
// Execute the playbook and write output to the log file
|
||||
if err := executor.NewPlaybookExecutor(ctx, client, playbook, file).Exec(ctx); err != nil {
|
||||
klog.ErrorS(err, "failed to exec playbook", "playbook", playbook.Name)
|
||||
// Create a cancellable context for playbook execution
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Register the playbook and its cancel function in the playbookManager
|
||||
m.addPlaybook(playbook, cancel)
|
||||
// Execute the playbook and write output to the log file
|
||||
if err := executor.NewPlaybookExecutor(ctx, client, playbook, file).Exec(ctx); err != nil {
|
||||
klog.ErrorS(err, "failed to exec playbook", "playbook", playbook.Name)
|
||||
}
|
||||
// Remove the playbook from the playbookManager after execution
|
||||
m.deletePlaybook(playbook)
|
||||
return nil
|
||||
}
|
||||
// Remove the playbook from the playbookManager after execution
|
||||
m.deletePlaybook(playbook)
|
||||
return nil
|
||||
if promise == "true" {
|
||||
go func() {
|
||||
if err := f(); err != nil {
|
||||
klog.ErrorS(err, "failed to execute playbook", "playbook", ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
return f()
|
||||
}
|
||||
|
||||
// addPlaybook adds a playbook and its cancel function to the manager map.
|
||||
|
|
|
|||
|
|
@ -6,9 +6,14 @@ import (
|
|||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -17,6 +22,7 @@ import (
|
|||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/utils"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/web/api"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/web/query"
|
||||
|
|
@ -39,43 +45,165 @@ func NewInventoryHandler(workdir string, restconfig *rest.Config, client ctrlcli
|
|||
func (h *InventoryHandler) Post(request *restful.Request, response *restful.Response) {
|
||||
inventory := &kkcorev1.Inventory{}
|
||||
if err := request.ReadEntity(inventory); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
if err := h.client.Create(request.Request.Context(), inventory); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = response.WriteEntity(inventory)
|
||||
}
|
||||
|
||||
// Patch updates an existing inventory resource.
|
||||
// It reads the patch data from the request body and applies it to the specified inventory.
|
||||
// Patch updates an existing inventory resource with clear variable definitions and English comments.
|
||||
func (h *InventoryHandler) Patch(request *restful.Request, response *restful.Response) {
|
||||
// Get namespace and inventory name from path parameters
|
||||
namespace := request.PathParameter("namespace")
|
||||
name := request.PathParameter("inventory")
|
||||
data, err := io.ReadAll(request.Request.Body)
|
||||
inventoryName := request.PathParameter("inventory")
|
||||
|
||||
// Read the patch body from the request
|
||||
patchBody, err := io.ReadAll(request.Request.Body)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to read patch body from request"))
|
||||
return
|
||||
}
|
||||
patchType := request.HeaderParameter("Content-Type")
|
||||
// Get the Content-Type header to determine patch type
|
||||
contentType := request.HeaderParameter("Content-Type")
|
||||
patchType := types.PatchType(contentType)
|
||||
|
||||
// Get the existing inventory object.
|
||||
inventory := &kkcorev1.Inventory{}
|
||||
if err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, inventory); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
// Get the codec for encoding/decoding Inventory objects
|
||||
codec := _const.CodecFactory.LegacyCodec(kkcorev1.SchemeGroupVersion)
|
||||
|
||||
// Retrieve the old inventory object from the cluster
|
||||
oldInventory := &kkcorev1.Inventory{}
|
||||
if err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: inventoryName}, oldInventory); err != nil {
|
||||
api.HandleError(response, request, errors.Wrapf(err, "failed to get Inventory %s/%s from cluster", namespace, inventoryName))
|
||||
return
|
||||
}
|
||||
// Encode the old inventory object to JSON
|
||||
oldInventoryJSON, err := runtime.Encode(codec, oldInventory)
|
||||
if err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to encode old inventory object to JSON"))
|
||||
return
|
||||
}
|
||||
|
||||
// Apply the patch.
|
||||
if err := h.client.Patch(request.Request.Context(), inventory, ctrlclient.RawPatch(types.PatchType(patchType), data)); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
// Apply the patch to the old inventory and decode the result
|
||||
applyPatchAndDecode := func() (*kkcorev1.Inventory, error) {
|
||||
var patchedJSON []byte
|
||||
switch patchType {
|
||||
case types.JSONPatchType:
|
||||
patchObj, err := jsonpatch.DecodePatch(patchBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode JSON patch")
|
||||
}
|
||||
patchedJSON, err = patchObj.Apply(oldInventoryJSON)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to apply JSON patch to old inventory JSON")
|
||||
}
|
||||
case types.MergePatchType:
|
||||
var err error
|
||||
patchedJSON, err = jsonpatch.MergePatch(oldInventoryJSON, patchBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to apply merge patch to old inventory JSON")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown Content-Type header for patch: %v", patchType)
|
||||
}
|
||||
newInventory := &kkcorev1.Inventory{}
|
||||
err := runtime.DecodeInto(codec, patchedJSON, newInventory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode patched inventory JSON")
|
||||
}
|
||||
return newInventory, nil
|
||||
}
|
||||
|
||||
updatedInventory, err := applyPatchAndDecode()
|
||||
if err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to apply patch and decode inventory"))
|
||||
return
|
||||
}
|
||||
|
||||
_ = response.WriteEntity(inventory)
|
||||
// completeInventory normalizes the inventory groups:
|
||||
// - Synchronizes the "kube_control_plane" group to the "etcd" group.
|
||||
// - Removes duplicate hosts and groups within each group.
|
||||
completeInventory := func(inventory *kkcorev1.Inventory) {
|
||||
// sync kube_control_plane group to etcd group
|
||||
inventory.Spec.Groups["etcd"] = inventory.Spec.Groups["kube_control_plane"]
|
||||
for k, gv := range inventory.Spec.Groups {
|
||||
gv.Hosts = utils.RemoveDuplicatesInOrder(gv.Hosts)
|
||||
gv.Groups = utils.RemoveDuplicatesInOrder(gv.Groups)
|
||||
inventory.Spec.Groups[k] = gv
|
||||
}
|
||||
}
|
||||
completeInventory(updatedInventory)
|
||||
|
||||
// Patch the inventory resource in the cluster
|
||||
if err := h.client.Patch(request.Request.Context(), updatedInventory, ctrlclient.MergeFrom(oldInventory)); err != nil {
|
||||
api.HandleError(response, request, errors.Wrapf(err, "failed to patch Inventory %s/%s in cluster", namespace, inventoryName))
|
||||
return
|
||||
}
|
||||
|
||||
// Create a host-check playbook and set the workdir
|
||||
hostCheckPlaybook := &kkcorev1.Playbook{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "host-check-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: kkcorev1.PlaybookSpec{
|
||||
InventoryRef: &corev1.ObjectReference{
|
||||
Kind: "Inventory",
|
||||
Namespace: namespace,
|
||||
Name: inventoryName,
|
||||
},
|
||||
Playbook: "host_check.yaml",
|
||||
},
|
||||
Status: kkcorev1.PlaybookStatus{
|
||||
Phase: kkcorev1.PlaybookPhasePending,
|
||||
},
|
||||
}
|
||||
// Set the workdir in the playbook's config
|
||||
if err := unstructured.SetNestedField(hostCheckPlaybook.Spec.Config.Value(), h.workdir, _const.Workdir); err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to set workdir in playbook config"))
|
||||
return
|
||||
}
|
||||
// Create the playbook resource in the cluster
|
||||
if err := h.client.Create(request.Request.Context(), hostCheckPlaybook); err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to create host-check playbook in cluster"))
|
||||
return
|
||||
}
|
||||
|
||||
// Execute the playbook asynchronously if "promise" is true (default)
|
||||
if err := playbookManager.executor(hostCheckPlaybook, h.client, query.DefaultString(request.QueryParameter("promise"), "true")); err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to execute host-check playbook"))
|
||||
return
|
||||
}
|
||||
|
||||
// Patch the inventory annotation with the host-check playbook name
|
||||
if updatedInventory.Annotations == nil {
|
||||
updatedInventory.Annotations = make(map[string]string)
|
||||
}
|
||||
updatedInventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation] = hostCheckPlaybook.Name
|
||||
|
||||
patchObj := &kkcorev1.Inventory{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: updatedInventory.Name,
|
||||
Namespace: updatedInventory.Namespace,
|
||||
Annotations: updatedInventory.Annotations,
|
||||
},
|
||||
}
|
||||
baseObj := &kkcorev1.Inventory{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: updatedInventory.Name,
|
||||
Namespace: updatedInventory.Namespace,
|
||||
},
|
||||
}
|
||||
if err := h.client.Patch(request.Request.Context(), patchObj, ctrlclient.MergeFrom(baseObj)); err != nil {
|
||||
api.HandleError(response, request, errors.Wrapf(err, "failed to patch inventory annotation for %s/%s", updatedInventory.Namespace, updatedInventory.Name))
|
||||
return
|
||||
}
|
||||
|
||||
_ = response.WriteEntity(updatedInventory)
|
||||
}
|
||||
|
||||
// List returns all inventory resources with optional filtering and sorting.
|
||||
|
|
@ -85,25 +213,24 @@ func (h *InventoryHandler) List(request *restful.Request, response *restful.Resp
|
|||
var fieldselector fields.Selector
|
||||
// Parse field selector from query parameters if present.
|
||||
if v, ok := queryParam.Filters[query.ParameterFieldSelector]; ok {
|
||||
fs, err := fields.ParseSelector(string(v))
|
||||
fs, err := fields.ParseSelector(v)
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
fieldselector = fs
|
||||
}
|
||||
namespace := request.PathParameter("namespace")
|
||||
|
||||
inventoryList := &kkcorev1.InventoryList{}
|
||||
// List inventory resources from the Kubernetes API.
|
||||
err := h.client.List(request.Request.Context(), inventoryList, &ctrlclient.ListOptions{Namespace: namespace, LabelSelector: queryParam.Selector(), FieldSelector: fieldselector})
|
||||
err := h.client.List(request.Request.Context(), inventoryList, &ctrlclient.ListOptions{Namespace: request.PathParameter("namespace"), LabelSelector: queryParam.Selector(), FieldSelector: fieldselector})
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Sort and filter the inventory list using DefaultList.
|
||||
results := query.DefaultList(inventoryList.Items, queryParam, func(left, right kkcorev1.Inventory, sortBy query.Field) bool {
|
||||
results := query.DefaultList(inventoryList.Items, queryParam, func(left, right kkcorev1.Inventory, sortBy string) bool {
|
||||
leftMeta, err := meta.Accessor(left)
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
@ -154,7 +281,6 @@ func (h *InventoryHandler) ListHosts(request *restful.Request, response *restful
|
|||
queryParam := query.ParseQueryParameter(request)
|
||||
namespace := request.PathParameter("namespace")
|
||||
name := request.PathParameter("inventory")
|
||||
|
||||
// Retrieve the inventory object from the cluster.
|
||||
inventory := &kkcorev1.Inventory{}
|
||||
err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, inventory)
|
||||
|
|
@ -239,6 +365,8 @@ func (h *InventoryHandler) ListHosts(request *restful.Request, response *restful
|
|||
fillByPlaybook := func(playbook kkcorev1.Playbook, item *api.InventoryHostTable) {
|
||||
// Set status and architecture based on playbook phase and result.
|
||||
switch playbook.Status.Phase {
|
||||
case kkcorev1.PlaybookPhasePending, kkcorev1.PlaybookPhaseRunning:
|
||||
item.Status = api.ResultPending
|
||||
case kkcorev1.PlaybookPhaseFailed:
|
||||
item.Status = api.ResultFailed
|
||||
case kkcorev1.PlaybookPhaseSucceeded:
|
||||
|
|
@ -253,7 +381,7 @@ func (h *InventoryHandler) ListHosts(request *restful.Request, response *restful
|
|||
}
|
||||
|
||||
// less is a comparison function for sorting InventoryHostTable items by a given field.
|
||||
less := func(left, right api.InventoryHostTable, sortBy query.Field) bool {
|
||||
less := func(left, right api.InventoryHostTable, sortBy string) bool {
|
||||
// Compare fields for sorting.
|
||||
leftVal := query.GetFieldByJSONTag(reflect.ValueOf(left), sortBy)
|
||||
rightVal := query.GetFieldByJSONTag(reflect.ValueOf(right), sortBy)
|
||||
|
|
@ -273,7 +401,7 @@ func (h *InventoryHandler) ListHosts(request *restful.Request, response *restful
|
|||
val := query.GetFieldByJSONTag(reflect.ValueOf(o), f.Field)
|
||||
switch val.Kind() {
|
||||
case reflect.String:
|
||||
return strings.Contains(val.String(), string(f.Value))
|
||||
return strings.Contains(val.String(), f.Value)
|
||||
default:
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
@ -13,14 +12,15 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -46,7 +46,7 @@ func (h *PlaybookHandler) Post(request *restful.Request, response *restful.Respo
|
|||
playbook := &kkcorev1.Playbook{}
|
||||
// Read the playbook entity from the request body
|
||||
if err := request.ReadEntity(playbook); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ func (h *PlaybookHandler) Post(request *restful.Request, response *restful.Respo
|
|||
if err := h.client.List(request.Request.Context(), playbookList, ctrlclient.MatchingLabels{
|
||||
labelKey: labelValue,
|
||||
}); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
// If any playbook with the same schema label exists, this is a conflict
|
||||
|
|
@ -85,22 +85,20 @@ func (h *PlaybookHandler) Post(request *restful.Request, response *restful.Respo
|
|||
|
||||
// Set the workdir in the playbook's spec config
|
||||
if err := unstructured.SetNestedField(playbook.Spec.Config.Value(), h.workdir, _const.Workdir); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
playbook.Status.Phase = kkcorev1.PlaybookPhasePending
|
||||
// Create the playbook resource in Kubernetes
|
||||
if err := h.client.Create(context.TODO(), playbook); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
// Start playbook execution in a separate goroutine
|
||||
go func() {
|
||||
if err := playbookManager.executor(playbook, h.client); err != nil {
|
||||
klog.ErrorS(err, "failed to executor playbook", "playbook", ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := playbookManager.executor(playbook, h.client, query.DefaultString(request.QueryParameter("promise"), "true")); err != nil {
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to execute playbook"))
|
||||
return
|
||||
}
|
||||
// For web UI: it does not run in Kubernetes, so execute playbook immediately.
|
||||
_ = response.WriteEntity(playbook)
|
||||
}
|
||||
|
|
@ -112,25 +110,22 @@ func (h *PlaybookHandler) List(request *restful.Request, response *restful.Respo
|
|||
var fieldselector fields.Selector
|
||||
// Parse field selector from query parameters if present.
|
||||
if v, ok := queryParam.Filters[query.ParameterFieldSelector]; ok {
|
||||
fs, err := fields.ParseSelector(string(v))
|
||||
fs, err := fields.ParseSelector(v)
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
fieldselector = fs
|
||||
}
|
||||
namespace := request.PathParameter("namespace")
|
||||
|
||||
playbookList := &kkcorev1.PlaybookList{}
|
||||
// List playbooks from the Kubernetes API with the specified options.
|
||||
err := h.client.List(request.Request.Context(), playbookList, &ctrlclient.ListOptions{Namespace: namespace, LabelSelector: queryParam.Selector(), FieldSelector: fieldselector})
|
||||
err := h.client.List(request.Request.Context(), playbookList, &ctrlclient.ListOptions{Namespace: request.PathParameter("namespace"), LabelSelector: queryParam.Selector(), FieldSelector: fieldselector})
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Sort and filter the playbook list using DefaultList.
|
||||
results := query.DefaultList(playbookList.Items, queryParam, func(left, right kkcorev1.Playbook, sortBy query.Field) bool {
|
||||
results := query.DefaultList(playbookList.Items, queryParam, func(left, right kkcorev1.Playbook, sortBy string) bool {
|
||||
leftMeta, err := meta.Accessor(left)
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
@ -267,7 +262,11 @@ func (h *PlaybookHandler) Delete(request *restful.Request, response *restful.Res
|
|||
// Retrieve the playbook resource to delete.
|
||||
err := h.client.Get(request.Request.Context(), ctrlclient.ObjectKey{Namespace: namespace, Name: name}, playbook)
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
if apierrors.IsNotFound(err) {
|
||||
_ = response.WriteEntity(api.SUCCESS)
|
||||
} else {
|
||||
api.HandleError(response, request, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
// Stop the playbook execution if it is running.
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
|
@ -32,6 +33,7 @@ import (
|
|||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/utils"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/web/api"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/web/query"
|
||||
)
|
||||
|
|
@ -67,18 +69,22 @@ func (h ResourceHandler) ConfigInfo(request *restful.Request, response *restful.
|
|||
// PostConfig updates the config file and triggers precheck playbooks if needed.
|
||||
func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.Response) {
|
||||
var (
|
||||
oldConfig map[string]any
|
||||
newConfig map[string]any
|
||||
oldConfig map[string]map[string]any
|
||||
newConfig map[string]map[string]any
|
||||
)
|
||||
bodyBytes, err := io.ReadAll(request.Request.Body)
|
||||
if err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// Read new config from request body.
|
||||
if err := request.ReadEntity(&newConfig); err != nil {
|
||||
if err := json.Unmarshal(bodyBytes, &newConfig); err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
configPath := filepath.Join(h.rootPath, api.SchemaConfigFile)
|
||||
// Open config file for reading and writing.
|
||||
configFile, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
configFile, err := os.OpenFile(filepath.Join(h.rootPath, api.SchemaConfigFile), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
|
|
@ -91,10 +97,8 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
return
|
||||
}
|
||||
|
||||
queryParam := query.ParseQueryParameter(request)
|
||||
namespace := queryParam.Filters["cluster"]
|
||||
inventory := queryParam.Filters["inventory"]
|
||||
|
||||
namespace := query.DefaultString(request.QueryParameter("cluster"), "default")
|
||||
inventory := query.DefaultString(request.QueryParameter("inventory"), "default")
|
||||
playbooks := make(map[string]*kkcorev1.Playbook)
|
||||
wg := wait.Group{}
|
||||
|
||||
|
|
@ -115,20 +119,19 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
}
|
||||
// If a precheck playbook is defined, create and execute it.
|
||||
if pbpath := schemaFile.PlaybookPath["precheck."+api.SchemaLabelSubfix]; pbpath != "" {
|
||||
configRaw, err := json.Marshal(newVal)
|
||||
if err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
playbook := &kkcorev1.Playbook{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "precheck-" + strings.TrimSuffix(fileName, filepath.Ext(fileName)) + "-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: kkcorev1.PlaybookSpec{
|
||||
Config: kkcorev1.Config{
|
||||
Spec: runtime.RawExtension{Raw: configRaw},
|
||||
Spec: runtime.RawExtension{Object: &unstructured.Unstructured{Object: newVal}},
|
||||
},
|
||||
InventoryRef: &corev1.ObjectReference{
|
||||
Kind: "Inventory",
|
||||
Namespace: string(namespace),
|
||||
Name: string(inventory),
|
||||
Namespace: namespace,
|
||||
Name: inventory,
|
||||
},
|
||||
Playbook: pbpath,
|
||||
},
|
||||
|
|
@ -138,17 +141,17 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
}
|
||||
// Set the workdir in the playbook's spec config
|
||||
if err := unstructured.SetNestedField(playbook.Spec.Config.Value(), h.workdir, _const.Workdir); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
if err := h.client.Create(context.TODO(), playbook); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, errors.Wrap(err, "failed to create precheck playbook"))
|
||||
return
|
||||
}
|
||||
playbooks[fileName] = playbook
|
||||
wg.Start(func() {
|
||||
// Execute the playbook asynchronously.
|
||||
if err := playbookManager.executor(playbook, h.client); err != nil {
|
||||
if err := playbookManager.executor(playbook, h.client, "false"); err != nil {
|
||||
klog.ErrorS(err, "failed to executor precheck playbook", "schema", fileName)
|
||||
}
|
||||
})
|
||||
|
|
@ -165,14 +168,14 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
}
|
||||
|
||||
// Write new config to file.
|
||||
if _, err := io.Copy(configFile, request.Request.Body); err != nil {
|
||||
if _, err := configFile.Write(bodyBytes); err != nil {
|
||||
_ = response.WriteError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Respond with precheck results if any failures, otherwise success.
|
||||
if len(preCheckResult) > 0 {
|
||||
_ = response.WriteEntity(api.Result{Message: api.ResultFailed, Result: preCheckResult})
|
||||
_ = response.WriteHeaderAndEntity(http.StatusUnprocessableEntity, api.Result{Message: api.ResultFailed, Result: preCheckResult})
|
||||
} else {
|
||||
_ = response.WriteEntity(api.SUCCESS)
|
||||
}
|
||||
|
|
@ -181,32 +184,24 @@ func (h ResourceHandler) PostConfig(request *restful.Request, response *restful.
|
|||
// ListIP lists all IPs in the given CIDR, checks their online and SSH status, and returns the result.
|
||||
func (h ResourceHandler) ListIP(request *restful.Request, response *restful.Response) {
|
||||
queryParam := query.ParseQueryParameter(request)
|
||||
cidr, ok := queryParam.Filters["cidr"]
|
||||
if !ok || string(cidr) == "" {
|
||||
api.HandleBadRequest(response, request, errors.New("cidr parameter is required"))
|
||||
return
|
||||
}
|
||||
sshPort, ok := queryParam.Filters["sshPort"]
|
||||
if !ok || string(sshPort) == "" {
|
||||
sshPort = "22"
|
||||
}
|
||||
ips := _const.ParseIP(string(cidr))
|
||||
cidr := request.QueryParameter("cidr")
|
||||
sshPort := query.DefaultString(request.QueryParameter("sshPort"), "22")
|
||||
|
||||
ips := utils.ParseIP(cidr)
|
||||
ipTable := make([]api.IPTable, 0, len(ips))
|
||||
maxConcurrency := 20
|
||||
mu := sync.Mutex{}
|
||||
jobChannel := make(chan string, 20)
|
||||
wg := sync.WaitGroup{}
|
||||
wg := wait.Group{}
|
||||
// Start worker goroutines for concurrent IP checking.
|
||||
for range maxConcurrency {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Start(func() {
|
||||
for ip := range jobChannel {
|
||||
if _const.IsLocalhostIP(ip) {
|
||||
if utils.IsLocalhostIP(ip) {
|
||||
mu.Lock()
|
||||
ipTable = append(ipTable, api.IPTable{
|
||||
IP: ip,
|
||||
SSHPort: string(sshPort),
|
||||
SSHPort: sshPort,
|
||||
Localhost: true,
|
||||
SSHReachable: true,
|
||||
SSHAuthorized: true,
|
||||
|
|
@ -220,18 +215,18 @@ func (h ResourceHandler) ListIP(request *restful.Request, response *restful.Resp
|
|||
if !isIPOnline(ip) {
|
||||
continue
|
||||
}
|
||||
reachable, authorized := isSSHAuthorized(ip, string(sshPort))
|
||||
reachable, authorized := isSSHAuthorized(ip, sshPort)
|
||||
|
||||
mu.Lock()
|
||||
ipTable = append(ipTable, api.IPTable{
|
||||
IP: ip,
|
||||
SSHPort: string(sshPort),
|
||||
SSHPort: sshPort,
|
||||
SSHReachable: reachable,
|
||||
SSHAuthorized: authorized,
|
||||
})
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// Send IPs to job channel for processing.
|
||||
|
|
@ -243,7 +238,7 @@ func (h ResourceHandler) ListIP(request *restful.Request, response *restful.Resp
|
|||
wg.Wait()
|
||||
|
||||
// less is a comparison function for sorting IPTable items by a given field.
|
||||
less := func(left, right api.IPTable, sortBy query.Field) bool {
|
||||
less := func(left, right api.IPTable, sortBy string) bool {
|
||||
leftVal := query.GetFieldByJSONTag(reflect.ValueOf(left), sortBy)
|
||||
rightVal := query.GetFieldByJSONTag(reflect.ValueOf(right), sortBy)
|
||||
switch leftVal.Kind() {
|
||||
|
|
@ -271,7 +266,7 @@ func (h ResourceHandler) ListIP(request *restful.Request, response *restful.Resp
|
|||
val := query.GetFieldByJSONTag(reflect.ValueOf(o), f.Field)
|
||||
switch val.Kind() {
|
||||
case reflect.String:
|
||||
return strings.Contains(val.String(), string(f.Value))
|
||||
return strings.Contains(val.String(), f.Value)
|
||||
default:
|
||||
return true
|
||||
}
|
||||
|
|
@ -295,7 +290,7 @@ func (h ResourceHandler) ListSchema(request *restful.Request, response *restful.
|
|||
// Read all entries in the rootPath directory.
|
||||
entries, err := os.ReadDir(h.rootPath)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
schemaTable := make([]api.SchemaTable, 0)
|
||||
|
|
@ -309,19 +304,19 @@ func (h ResourceHandler) ListSchema(request *restful.Request, response *restful.
|
|||
// Read the JSON file.
|
||||
data, err := os.ReadFile(filepath.Join(h.rootPath, entry.Name()))
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, request, errors.Wrapf(err, "failed to read file for schema %q", entry.Name()))
|
||||
api.HandleError(response, request, errors.Wrapf(err, "failed to read file for schema %q", entry.Name()))
|
||||
return
|
||||
}
|
||||
var schemaFile api.SchemaFile
|
||||
// Unmarshal the JSON data into a SchemaTable struct.
|
||||
if err := json.Unmarshal(data, &schemaFile); err != nil {
|
||||
api.HandleBadRequest(response, request, errors.Wrapf(err, "failed to unmarshal file for schema %q", entry.Name()))
|
||||
api.HandleError(response, request, errors.Wrapf(err, "failed to unmarshal file for schema %q", entry.Name()))
|
||||
return
|
||||
}
|
||||
// Get all playbooks in the given namespace.
|
||||
playbookList := &kkcorev1.PlaybookList{}
|
||||
if err := h.client.List(request.Request.Context(), playbookList, ctrlclient.InNamespace(request.PathParameter("cluster"))); err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
schema := api.SchemaFile2Table(schemaFile, entry.Name())
|
||||
|
|
@ -332,14 +327,14 @@ func (h ResourceHandler) ListSchema(request *restful.Request, response *restful.
|
|||
if _, ok := schema.Playbook[label]; ok && schemaName == schema.Name {
|
||||
// If a playbook for this label already exists, return an error.
|
||||
if schema.Playbook[label].Name != "" {
|
||||
api.HandleBadRequest(response, request, errors.Errorf("schema %q has multiple playbooks of label %q", entry.Name(), label))
|
||||
api.HandleError(response, request, errors.Errorf("schema %q has multiple playbooks of label %q", entry.Name(), label))
|
||||
return
|
||||
}
|
||||
var result any
|
||||
// If the playbook has a result, unmarshal it.
|
||||
if len(playbook.Status.Result.Raw) != 0 {
|
||||
if err := json.Unmarshal(playbook.Status.Result.Raw, &result); err != nil {
|
||||
api.HandleBadRequest(response, request, errors.Errorf("failed to unmarshal result from playbook of schema %q", schema.Name))
|
||||
api.HandleError(response, request, errors.Errorf("failed to unmarshal result from playbook of schema %q", schema.Name))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -358,7 +353,7 @@ func (h ResourceHandler) ListSchema(request *restful.Request, response *restful.
|
|||
schemaTable = append(schemaTable, schema)
|
||||
}
|
||||
// less is a comparison function for sorting SchemaTable items by a given field.
|
||||
less := func(left, right api.SchemaTable, sortBy query.Field) bool {
|
||||
less := func(left, right api.SchemaTable, sortBy string) bool {
|
||||
leftVal := query.GetFieldByJSONTag(reflect.ValueOf(left), sortBy)
|
||||
rightVal := query.GetFieldByJSONTag(reflect.ValueOf(right), sortBy)
|
||||
switch leftVal.Kind() {
|
||||
|
|
@ -376,9 +371,9 @@ func (h ResourceHandler) ListSchema(request *restful.Request, response *restful.
|
|||
val := query.GetFieldByJSONTag(reflect.ValueOf(o), f.Field)
|
||||
switch val.Kind() {
|
||||
case reflect.String:
|
||||
return strings.Contains(val.String(), string(f.Value))
|
||||
return strings.Contains(val.String(), f.Value)
|
||||
case reflect.Int:
|
||||
v, err := strconv.Atoi(string(f.Value))
|
||||
v, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,12 +18,6 @@ package query
|
|||
|
||||
import "reflect"
|
||||
|
||||
// Field represents a query field name used for filtering and sorting
|
||||
type Field string
|
||||
|
||||
// Value represents a query field value used for filtering
|
||||
type Value string
|
||||
|
||||
const (
|
||||
// FieldName represents the name field of a resource
|
||||
FieldName = "name"
|
||||
|
|
@ -56,7 +50,7 @@ const (
|
|||
// GetFieldByJSONTag returns the value of the struct field whose JSON tag matches the given field name (filed).
|
||||
// If not found by JSON tag, it tries to find the field by its struct field name.
|
||||
// The function expects obj to be a struct or a pointer to a struct.
|
||||
func GetFieldByJSONTag(obj reflect.Value, filed Field) reflect.Value {
|
||||
func GetFieldByJSONTag(obj reflect.Value, filed string) reflect.Value {
|
||||
// If obj is a pointer, get the element it points to
|
||||
if obj.Kind() == reflect.Ptr {
|
||||
obj = obj.Elem()
|
||||
|
|
@ -72,17 +66,17 @@ func GetFieldByJSONTag(obj reflect.Value, filed Field) reflect.Value {
|
|||
jsonTag := structField.Tag.Get("json")
|
||||
// The tag may have options, e.g. "name,omitempty"
|
||||
// Check for exact match or prefix match before comma
|
||||
if jsonTag == string(filed) ||
|
||||
(jsonTag != "" && jsonTag == string(filed)+",omitempty") ||
|
||||
(jsonTag != "" && len(jsonTag) >= len(string(filed)) &&
|
||||
jsonTag[:len(string(filed))] == string(filed) &&
|
||||
(len(jsonTag) == len(string(filed)) || jsonTag[len(string(filed))] == ',')) {
|
||||
if jsonTag == filed ||
|
||||
(jsonTag != "" && jsonTag == filed+",omitempty") ||
|
||||
(jsonTag != "" && len(jsonTag) >= len(filed) &&
|
||||
jsonTag[:len(filed)] == filed &&
|
||||
(len(jsonTag) == len(filed) || jsonTag[len(filed)] == ',')) {
|
||||
// Return the field value if the JSON tag matches
|
||||
return obj.Field(i)
|
||||
}
|
||||
}
|
||||
// If not found by json tag, try by field name (case-sensitive)
|
||||
if f := obj.FieldByName(string(filed)); f.IsValid() {
|
||||
if f := obj.FieldByName(filed); f.IsValid() {
|
||||
return f
|
||||
}
|
||||
// Return zero Value if not found
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
// CompareFunc is a generic function type that compares two objects of type T
|
||||
// Returns true if left is greater than right
|
||||
type CompareFunc[T any] func(T, T, Field) bool
|
||||
type CompareFunc[T any] func(T, T, string) bool
|
||||
|
||||
// FilterFunc is a generic function type that filters objects of type T
|
||||
// Returns true if the object matches the filter criteria
|
||||
|
|
@ -93,7 +93,7 @@ func DefaultList[T any](objects []T, q *Query, compareFunc CompareFunc[T], filte
|
|||
// DefaultObjectMetaCompare compares two metav1.Object instances
|
||||
// Returns true if left is greater than right based on the specified sort field
|
||||
// Supports sorting by name or creation timestamp
|
||||
func DefaultObjectMetaCompare(left, right metav1.Object, sortBy Field) bool {
|
||||
func DefaultObjectMetaCompare(left, right metav1.Object, sortBy string) bool {
|
||||
switch sortBy {
|
||||
// ?sortBy=name
|
||||
case FieldName:
|
||||
|
|
@ -121,7 +121,7 @@ func DefaultObjectMetaCompare(left, right metav1.Object, sortBy Field) bool {
|
|||
func DefaultObjectMetaFilter(item metav1.Object, filter Filter) bool {
|
||||
switch filter.Field {
|
||||
case FieldNames:
|
||||
for _, name := range strings.Split(string(filter.Value), ",") {
|
||||
for _, name := range strings.Split(filter.Value, ",") {
|
||||
if item.GetName() == name {
|
||||
return true
|
||||
}
|
||||
|
|
@ -129,17 +129,17 @@ func DefaultObjectMetaFilter(item metav1.Object, filter Filter) bool {
|
|||
return false
|
||||
// /namespaces?page=1&limit=10&name=default
|
||||
case FieldName:
|
||||
return strings.Contains(item.GetName(), string(filter.Value))
|
||||
return strings.Contains(item.GetName(), filter.Value)
|
||||
// /namespaces?page=1&limit=10&uid=a8a8d6cf-f6a5-4fea-9c1b-e57610115706
|
||||
case FieldUID:
|
||||
return string(item.GetUID()) == string(filter.Value)
|
||||
return string(item.GetUID()) == filter.Value
|
||||
// /deployments?page=1&limit=10&namespace=kubesphere-system
|
||||
case FieldNamespace:
|
||||
return item.GetNamespace() == string(filter.Value)
|
||||
return item.GetNamespace() == filter.Value
|
||||
// /namespaces?page=1&limit=10&ownerReference=a8a8d6cf-f6a5-4fea-9c1b-e57610115706
|
||||
case FieldOwnerReference:
|
||||
for _, ownerReference := range item.GetOwnerReferences() {
|
||||
if string(ownerReference.UID) == string(filter.Value) {
|
||||
if string(ownerReference.UID) == filter.Value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
@ -147,17 +147,17 @@ func DefaultObjectMetaFilter(item metav1.Object, filter Filter) bool {
|
|||
// /namespaces?page=1&limit=10&ownerKind=Workspace
|
||||
case FieldOwnerKind:
|
||||
for _, ownerReference := range item.GetOwnerReferences() {
|
||||
if ownerReference.Kind == string(filter.Value) {
|
||||
if ownerReference.Kind == filter.Value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
// /namespaces?page=1&limit=10&annotation=openpitrix_runtime
|
||||
case FieldAnnotation:
|
||||
return labelMatch(item.GetAnnotations(), string(filter.Value))
|
||||
return labelMatch(item.GetAnnotations(), filter.Value)
|
||||
// /namespaces?page=1&limit=10&label=kubesphere.io/workspace:system-workspace
|
||||
case FieldLabel:
|
||||
return labelMatch(item.GetLabels(), string(filter.Value))
|
||||
return labelMatch(item.GetLabels(), filter.Value)
|
||||
// not supported filter
|
||||
default:
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -40,13 +40,13 @@ type Query struct {
|
|||
Pagination *Pagination // Pagination settings for the query results
|
||||
|
||||
// SortBy specifies which field to sort results by, defaults to FieldCreationTimeStamp
|
||||
SortBy Field
|
||||
SortBy string
|
||||
|
||||
// Ascending determines sort direction, defaults to descending (false)
|
||||
Ascending bool
|
||||
|
||||
// Filters contains field-value pairs for filtering results
|
||||
Filters map[Field]Value
|
||||
Filters map[string]string
|
||||
|
||||
// LabelSelector contains the label selector string for filtering by labels
|
||||
LabelSelector string
|
||||
|
|
@ -118,14 +118,14 @@ func New() *Query {
|
|||
Pagination: NoPagination,
|
||||
SortBy: "",
|
||||
Ascending: false,
|
||||
Filters: map[Field]Value{},
|
||||
Filters: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// Filter represents a single field-value filter pair
|
||||
type Filter struct {
|
||||
Field Field `json:"field"` // Field to filter on
|
||||
Value Value `json:"value"` // Value to filter by
|
||||
Field string `json:"field"` // Field to filter on
|
||||
Value string `json:"value"` // Value to filter by
|
||||
}
|
||||
|
||||
// ParseQueryParameter parses query parameters from a RESTful request into a Query struct
|
||||
|
|
@ -145,9 +145,9 @@ func ParseQueryParameter(request *restful.Request) *Query {
|
|||
query.Pagination = newPagination(limit, (page-1)*limit)
|
||||
|
||||
// Parse sorting parameters
|
||||
query.SortBy = Field(defaultString(request.QueryParameter(ParameterOrderBy), FieldCreationTimeStamp))
|
||||
query.SortBy = DefaultString(request.QueryParameter(ParameterOrderBy), FieldCreationTimeStamp)
|
||||
|
||||
ascending, err := strconv.ParseBool(defaultString(request.QueryParameter(ParameterAscending), "false"))
|
||||
ascending, err := strconv.ParseBool(DefaultString(request.QueryParameter(ParameterAscending), "false"))
|
||||
if err != nil {
|
||||
query.Ascending = false
|
||||
} else {
|
||||
|
|
@ -164,15 +164,15 @@ func ParseQueryParameter(request *restful.Request) *Query {
|
|||
if len(values) > 0 {
|
||||
value = values[0]
|
||||
}
|
||||
query.Filters[Field(key)] = Value(value)
|
||||
query.Filters[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
// defaultString returns the default value if the input string is empty
|
||||
func defaultString(value, defaultValue string) string {
|
||||
// DefaultString returns the default value if the input string is empty
|
||||
func DefaultString(value, defaultValue string) string {
|
||||
if value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ func TestParseQueryParameter(t *testing.T) {
|
|||
Pagination: newPagination(10, 0),
|
||||
SortBy: FieldCreationTimeStamp,
|
||||
Ascending: true,
|
||||
Filters: map[Field]Value{
|
||||
FieldLabel: Value("app.kubernetes.io/name=book"),
|
||||
FieldName: Value("foo"),
|
||||
FieldStatus: Value("Running"),
|
||||
Filters: map[string]string{
|
||||
FieldLabel: "app.kubernetes.io/name=book",
|
||||
FieldName: "foo",
|
||||
FieldStatus: "Running",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -52,9 +52,9 @@ func TestParseQueryParameter(t *testing.T) {
|
|||
Pagination: NoPagination,
|
||||
SortBy: FieldCreationTimeStamp,
|
||||
Ascending: false,
|
||||
Filters: map[Field]Value{
|
||||
Field("xxxx"): Value("xxxx"),
|
||||
Field("dsfsw"): Value("xxxx"),
|
||||
Filters: map[string]string{
|
||||
"xxxx": "xxxx",
|
||||
"dsfsw": "xxxx",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -35,23 +35,24 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
// Inventory management routes
|
||||
ws.Route(ws.POST("/inventories").To(inventoryHandler.Post).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("create a inventory.").
|
||||
Doc("create a inventory.").Operation("createInventory").
|
||||
Consumes(restful.MIME_JSON).Produces(restful.MIME_JSON).
|
||||
Reads(kkcorev1.Inventory{}).
|
||||
Returns(http.StatusOK, _const.StatusOK, kkcorev1.Inventory{}))
|
||||
|
||||
ws.Route(ws.PATCH("/namespaces/{namespace}/inventories/{inventory}").To(inventoryHandler.Patch).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("patch a inventory.").
|
||||
Doc("patch a inventory.").Operation("patchInventory").
|
||||
Consumes(string(types.JSONPatchType), string(types.MergePatchType), string(types.ApplyPatchType)).Produces(restful.MIME_JSON).
|
||||
Reads(kkcorev1.Inventory{}).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the inventory")).
|
||||
Param(ws.PathParameter("inventory", "the name of the inventory")).
|
||||
Param(ws.QueryParameter("promise", "promise to execute playbook").Required(false).DefaultValue("true")).
|
||||
Returns(http.StatusOK, _const.StatusOK, kkcorev1.Inventory{}))
|
||||
|
||||
ws.Route(ws.GET("/inventories").To(inventoryHandler.List).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("list all inventories.").
|
||||
Doc("list all inventories.").Operation("listInventory").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d")).
|
||||
Param(ws.QueryParameter(query.ParameterLimit, "limit").Required(false)).
|
||||
|
|
@ -62,7 +63,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
ws.Route(ws.GET("/namespaces/{namespace}/inventories").To(inventoryHandler.List).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("list all inventories in a namespace.").
|
||||
Produces(restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON).Operation("listInventoryInNamespace").
|
||||
Param(ws.PathParameter("namespace", "the namespace of the inventory")).
|
||||
Param(ws.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d")).
|
||||
Param(ws.QueryParameter(query.ParameterLimit, "limit").Required(false)).
|
||||
|
|
@ -72,7 +73,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/inventories/{inventory}").To(inventoryHandler.Info).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("get a inventory in a namespace.").
|
||||
Doc("get a inventory in a namespace.").Operation("getInventory").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the inventory")).
|
||||
Param(ws.PathParameter("inventory", "the name of the inventory")).
|
||||
|
|
@ -80,7 +81,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/inventories/{inventory}/hosts").To(inventoryHandler.ListHosts).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("list all hosts in a inventory.").
|
||||
Doc("list all hosts in a inventory.").Operation("listInventoryHosts").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the inventory")).
|
||||
Param(ws.PathParameter("inventory", "the name of the inventory")).
|
||||
|
|
@ -94,14 +95,15 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
// Playbook management routes
|
||||
ws.Route(ws.POST("/playbooks").To(playbookHandler.Post).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("create a playbook.").
|
||||
Doc("create a playbook.").Operation("createPlaybook").
|
||||
Param(ws.QueryParameter("promise", "promise to execute playbook").Required(false).DefaultValue("true")).
|
||||
Consumes(restful.MIME_JSON).Produces(restful.MIME_JSON).
|
||||
Reads(kkcorev1.Playbook{}).
|
||||
Returns(http.StatusOK, _const.StatusOK, kkcorev1.Playbook{}))
|
||||
|
||||
ws.Route(ws.GET("/playbooks").To(playbookHandler.List).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("list all playbooks.").
|
||||
Doc("list all playbooks.").Operation("listPlaybook").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d")).
|
||||
Param(ws.QueryParameter(query.ParameterLimit, "limit").Required(false)).
|
||||
|
|
@ -111,7 +113,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/playbooks").To(playbookHandler.List).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("list all playbooks in a namespace.").
|
||||
Doc("list all playbooks in a namespace.").Operation("listPlaybookInNamespace").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the playbook")).
|
||||
Param(ws.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d")).
|
||||
|
|
@ -122,7 +124,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/playbooks/{playbook}").To(playbookHandler.Info).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("get or watch a playbook in a namespace.").
|
||||
Doc("get or watch a playbook in a namespace.").Operation("getPlaybook").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the playbook")).
|
||||
Param(ws.PathParameter("playbook", "the name of the playbook")).
|
||||
|
|
@ -131,7 +133,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/playbooks/{playbook}/log").To(playbookHandler.Log).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("get a playbook execute log.").
|
||||
Doc("get a playbook execute log.").Operation("getPlaybookLog").
|
||||
Produces("text/plain").
|
||||
Param(ws.PathParameter("namespace", "the namespace of the playbook")).
|
||||
Param(ws.PathParameter("playbook", "the name of the playbook")).
|
||||
|
|
@ -139,7 +141,7 @@ func NewCoreService(workdir string, client ctrlclient.Client, restconfig *rest.C
|
|||
|
||||
ws.Route(ws.DELETE("/namespaces/{namespace}/playbooks/{playbook}").To(playbookHandler.Delete).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{_const.KubeKeyTag}).
|
||||
Doc("delete a playbook.").
|
||||
Doc("delete a playbook.").Operation("deletePlaybook").
|
||||
Produces(restful.MIME_JSON).
|
||||
Param(ws.PathParameter("namespace", "the namespace of the playbook")).
|
||||
Param(ws.PathParameter("playbook", "the name of the playbook")).
|
||||
|
|
|
|||
Loading…
Reference in New Issue