fix: change pipeline to playbook (#2512)

Signed-off-by: joyceliu <joyceliu@yunify.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
liujian 2025-03-24 09:51:03 +08:00 committed by GitHub
parent ccca0edd81
commit e40c57fb9f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
106 changed files with 1461 additions and 1611 deletions

View File

@ -139,19 +139,21 @@ linters-settings:
# List of allowed packages.
allow:
- $gostd
- github.com/spf13
- github.com/pkg/sftp
- github.com/google/gops
- github.com/go-git/go-git
- github.com/cockroachdb/errors
- github.com/containerd/containerd/images
- github.com/fsnotify/fsnotify
- github.com/schollz/progressbar
- github.com/stretchr/testify
- github.com/go-git/go-git
- github.com/google/gops
- github.com/kubesphere/kubekey
- github.com/Masterminds/sprig
- github.com/opencontainers/image-spec
- oras.land/oras-go
- github.com/pkg/sftp
- github.com/schollz/progressbar
- github.com/spf13
- github.com/stretchr/testify
- k8s.io
- oras.land/oras-go
- sigs.k8s.io
- github.com/kubesphere/kubekey
forbidigo:
# Forbid the following identifiers (list of regexp).
# Default: ["^(fmt\\.Print(|f|ln)|print|println)$"]
@ -657,9 +659,6 @@ linters-settings:
- - 'core.WriteError[1].Message'
- '/^([^A-Z]|$)/'
- must not start with a capital letter
- - 'fmt.Errorf[0]'
- '/(^|[^\.!?])$/'
- must not end in punctuation
- - panic
- '/^[^\n]*$/'
- must not contain line breaks

View File

@ -172,6 +172,7 @@ generate-manifests-capkk: $(CONTROLLER_GEN) $(KUSTOMIZE) clean-crds-capkk ## Gen
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
@cd api && go mod tidy
@go mod tidy
.PHONY: generate-goimports
generate-goimports: ## Format all import, `goimports` is required.

View File

@ -10,17 +10,17 @@
```shell
helm upgrade --install --create-namespace -n kubekey-system kubekey kubekey-1.0.0.tgz
```
然后通过创建Inventory, Config, 和Pipeline资源来执行命令
然后通过创建Inventory, Config, 和Playbook资源来执行命令
**Inventory**: 任务执行的host清单. 用于定义与host相关, 与任务模板无关的变量. 详见[参数定义](docs/zh/201-variable.md)
**Config**: 给任务模板设置全局变量. 用于定义与host无关, 与任务模板相关的变量. 详见[参数定义](docs/zh/201-variable.md)
**Pipeline**: 指定执行的playbook文件
**Playbook**: 指定执行的playbook文件
## 二进制执行
可直接用二进制在命令行中执行命令
```shell
kk run -i inventory.yaml -c config.yaml playbook.yaml
```
运行命令后, 会在工作目录的runtime下生成对应的Inventory, Config和Pipeline资源
运行命令后, 会在工作目录的runtime下生成对应的Inventory, Config和Playbook资源
# 文档
**[项目模版编写规范](docs/zh/001-project.md)**

View File

@ -30,10 +30,10 @@ const (
// KKMachineBelongGroupLabel defines which kkmachine belong to.
KKMachineBelongGroupLabel = "kkmachine.infrastructure.cluster.x-k8s.io/group"
// AddNodePipelineAnnotation add node to cluster.
AddNodePipelineAnnotation = "pipeline.kubekey.kubesphere.io/add-node"
// DeleteNodePipelineAnnotation remove node from cluster.
DeleteNodePipelineAnnotation = "pipeline.kubekey.kubesphere.io/delete-node"
// AddNodePlaybookAnnotation add node to cluster.
AddNodePlaybookAnnotation = "playbook.kubekey.kubesphere.io/add-node"
// DeleteNodePlaybookAnnotation remove node from cluster.
DeleteNodePlaybookAnnotation = "playbook.kubekey.kubesphere.io/delete-node"
)
type KKMachineFailedReason string

View File

@ -17,10 +17,10 @@ limitations under the License.
package v1
import (
"fmt"
"reflect"
"strings"
"github.com/cockroachdb/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
@ -39,7 +39,7 @@ func (c *Config) SetValue(key string, value any) error {
configMap := make(map[string]any)
if c.Spec.Raw != nil {
if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
return err
return errors.WithStack(err)
}
}
// set value
@ -69,7 +69,7 @@ func (c *Config) SetValue(key string, value any) error {
}
data, err := json.Marshal(f(configMap, strings.Split(key, "."), value))
if err != nil {
return err
return errors.Wrapf(err, "failed to marshal %q value to json", key)
}
c.Spec.Raw = data
@ -81,7 +81,7 @@ func (c *Config) SetValue(key string, value any) error {
func (c *Config) GetValue(key string) (any, error) {
configMap := make(map[string]any)
if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
return nil, err
return nil, errors.WithStack(err)
}
// get all value
if key == "" {
@ -93,7 +93,7 @@ func (c *Config) GetValue(key string) (any, error) {
r, ok := result.(map[string]any)
if !ok {
// cannot find value
return nil, fmt.Errorf("cannot find key: %s", key)
return nil, errors.Errorf("cannot find key: %s", key)
}
result = r[k]
}

View File

@ -1,24 +1,23 @@
package v1
import (
"fmt"
"github.com/cockroachdb/errors"
"k8s.io/apimachinery/pkg/runtime"
)
const PipelineFieldPlaybook = "spec.playbook"
const PlaybookFieldPlaybook = "spec.playbook"
// AddConversionFuncs adds the conversion functions to the given scheme.
// NOTE: ownerReferences:pipeline is valid in proxy client.
// NOTE: ownerReferences:playbook is valid in proxy client.
func AddConversionFuncs(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("Pipeline"),
SchemeGroupVersion.WithKind("Playbook"),
func(label, value string) (string, string, error) {
if label == PipelineFieldPlaybook {
if label == PlaybookFieldPlaybook {
return label, value, nil
}
return "", "", fmt.Errorf("field label %q not supported for Pipeline", label)
return "", "", errors.Errorf("field label %q not supported for Playbook", label)
},
)
}

View File

@ -22,11 +22,11 @@ import (
)
const (
// InventoryCAPKKFinalizer is used to waiting ref pipeline compelete when inventory is deleted.
// InventoryCAPKKFinalizer is used to waiting ref playbook compelete when inventory is deleted.
InventoryCAPKKFinalizer = "inventory.kubekey.kubesphere.io/capkk"
// HostCheckPipelineAnnotation store which pipeline is used to check hosts.
HostCheckPipelineAnnotation = "pipeline.kubekey.kubesphere.io/host-check"
// HostCheckPlaybookAnnotation store which playbook is used to check hosts.
HostCheckPlaybookAnnotation = "playbook.kubekey.kubesphere.io/host-check"
)
// InventoryPhase of inventory. it's always use in capkk to judge if host has checked.
@ -35,11 +35,11 @@ type InventoryPhase string
const (
// InventoryPhasePending inventory has created but has never been checked once
InventoryPhasePending InventoryPhase = "Pending"
// InventoryPhaseRunning inventory host_check pipeline is running.
// InventoryPhaseRunning inventory host_check playbook is running.
InventoryPhaseRunning InventoryPhase = "Running"
// InventoryPhaseReady inventory host_check pipeline run successfully.
// InventoryPhaseReady inventory host_check playbook run successfully.
InventoryPhaseSucceeded InventoryPhase = "Succeeded"
// InventoryPhaseReady inventory host_check pipeline run check failed.
// InventoryPhaseReady inventory host_check playbook run check failed.
InventoryPhaseFailed InventoryPhase = "Failed"
)

View File

@ -25,40 +25,40 @@ const (
// BuiltinsProjectAnnotation use builtins project of KubeKey
BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
// PipelineCompletedFinalizer will be removed after the Pipeline is completed.
PipelineCompletedFinalizer = "kubekey.kubesphere.io/pipeline-completed"
// PlaybookCompletedFinalizer will be removed after the Playbook is completed.
PlaybookCompletedFinalizer = "kubekey.kubesphere.io/playbook-completed"
)
// PipelinePhase of Pipeline
type PipelinePhase string
// PlaybookPhase of Playbook
type PlaybookPhase string
const (
// PipelinePhasePending of Pipeline. Pipeline has created but not deal
PipelinePhasePending PipelinePhase = "Pending"
// PipelinePhaseRunning of Pipeline. deal Pipeline.
PipelinePhaseRunning PipelinePhase = "Running"
// PipelinePhaseFailed of Pipeline. once Task run failed.
PipelinePhaseFailed PipelinePhase = "Failed"
// PipelinePhaseSucceed of Pipeline. all Tasks run success.
PipelinePhaseSucceeded PipelinePhase = "Succeeded"
// PlaybookPhasePending of Playbook. Playbook has created but not deal
PlaybookPhasePending PlaybookPhase = "Pending"
// PlaybookPhaseRunning of Playbook. deal Playbook.
PlaybookPhaseRunning PlaybookPhase = "Running"
// PlaybookPhaseFailed of Playbook. once Task run failed.
PlaybookPhaseFailed PlaybookPhase = "Failed"
// PlaybookPhaseSucceed of Playbook. all Tasks run success.
PlaybookPhaseSucceeded PlaybookPhase = "Succeeded"
)
type PipelineFailedReason string
type PlaybookFailedReason string
const (
// PipelineFailedReasonUnknown is the default failed reason.
PipelineFailedReasonUnknown PipelineFailedReason = "unknown"
// PipelineFailedReasonPodFailed pod exec failed.
PipelineFailedReasonPodFailed PipelineFailedReason = "pod executor failed"
// PipelineFailedReasonTaskFailed task exec failed.
PipelineFailedReasonTaskFailed PipelineFailedReason = "task executor failed"
// PlaybookFailedReasonUnknown is the default failed reason.
PlaybookFailedReasonUnknown PlaybookFailedReason = "unknown"
// PlaybookFailedReasonPodFailed pod exec failed.
PlaybookFailedReasonPodFailed PlaybookFailedReason = "pod executor failed"
// PlaybookFailedReasonTaskFailed task exec failed.
PlaybookFailedReasonTaskFailed PlaybookFailedReason = "task executor failed"
)
// PipelineSpec of pipeline.
type PipelineSpec struct {
// PlaybookSpec of playbook.
type PlaybookSpec struct {
// Project is storage for executable packages
// +optional
Project PipelineProject `json:"project,omitempty"`
Project PlaybookProject `json:"project,omitempty"`
// Playbook which to execute.
Playbook string `json:"playbook"`
// InventoryRef is the node configuration for playbook
@ -73,7 +73,7 @@ type PipelineSpec struct {
// SkipTags is the tags of playbook which skip execute
// +optional
SkipTags []string `json:"skipTags,omitempty"`
// If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
// If Debug mode is true, It will retain runtime data after a successful execution of Playbook,
// which includes task execution status and parameters.
// +optional
Debug bool `json:"debug,omitempty"`
@ -89,8 +89,8 @@ type PipelineSpec struct {
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}
// PipelineProject respect which playbook store.
type PipelineProject struct {
// PlaybookProject respect which playbook store.
type PlaybookProject struct {
// Addr is the storage for executable packages (in Ansible file format).
// When starting with http or https, it will be obtained from a Git repository.
// When starting with file path, it will be obtained from the local path.
@ -113,24 +113,24 @@ type PipelineProject struct {
Token string `json:"token,omitempty"`
}
// PipelineStatus of Pipeline
type PipelineStatus struct {
// PlaybookStatus of Playbook
type PlaybookStatus struct {
// TaskResult total related tasks execute result.
TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
// Phase of pipeline.
Phase PipelinePhase `json:"phase,omitempty"`
TaskResult PlaybookTaskResult `json:"taskResult,omitempty"`
// Phase of playbook.
Phase PlaybookPhase `json:"phase,omitempty"`
// FailureReason will be set in the event that there is a terminal problem
// +optional
FailureReason PipelineFailedReason `json:"failureReason,omitempty"`
FailureReason PlaybookFailedReason `json:"failureReason,omitempty"`
// FailureMessage will be set in the event that there is a terminal problem
// +optional
FailureMessage string `json:"failureMessage,omitempty"`
// FailedDetail will record the failed tasks.
FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
FailedDetail []PlaybookFailedDetail `json:"failedDetail,omitempty"`
}
// PipelineTaskResult of Pipeline
type PipelineTaskResult struct {
// PlaybookTaskResult of Playbook
type PlaybookTaskResult struct {
// Total number of tasks.
Total int `json:"total,omitempty"`
// Success number of tasks.
@ -141,16 +141,16 @@ type PipelineTaskResult struct {
Ignored int `json:"ignored,omitempty"`
}
// PipelineFailedDetail store failed message when pipeline run failed.
type PipelineFailedDetail struct {
// PlaybookFailedDetail store failed message when playbook run failed.
type PlaybookFailedDetail struct {
// Task name of failed task.
Task string `json:"task,omitempty"`
// failed Hosts Result of failed task.
Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
Hosts []PlaybookFailedDetailHost `json:"hosts,omitempty"`
}
// PipelineFailedDetailHost detail failed message for each host.
type PipelineFailedDetailHost struct {
// PlaybookFailedDetailHost detail failed message for each host.
type PlaybookFailedDetailHost struct {
// Host name of failed task.
Host string `json:"host,omitempty"`
// Stdout of failed task.
@ -169,33 +169,24 @@ type PipelineFailedDetailHost struct {
// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// Pipeline resource executor a playbook.
type Pipeline struct {
// Playbook resource executor a playbook.
type Playbook struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PipelineSpec `json:"spec,omitempty"`
Status PipelineStatus `json:"status,omitempty"`
Spec PlaybookSpec `json:"spec,omitempty"`
Status PlaybookStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PipelineList of Pipeline
type PipelineList struct {
// PlaybookList of Playbook
type PlaybookList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Pipeline `json:"items"`
Items []Playbook `json:"items"`
}
func init() {
SchemeBuilder.Register(&Pipeline{}, &PipelineList{})
SchemeBuilder.Register(&Playbook{}, &PlaybookList{})
}
// //+kubebuilder:webhook:path=/mutate-kubekey-kubesphere-io-v1beta1-pipeline,mutating=true,failurePolicy=fail,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=kkmachines,verbs=create;update,versions=v1beta1,name=default.kkmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1
// var _ webhook.Defaulter = &Pipeline{}
// // Default implements webhook.Defaulter so a webhook will be registered for the type
// func (k *Pipeline) Default() {
// }

View File

@ -194,7 +194,7 @@ func (in *InventoryStatus) DeepCopy() *InventoryStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pipeline) DeepCopyInto(out *Pipeline) {
func (in *Playbook) DeepCopyInto(out *Playbook) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@ -202,18 +202,18 @@ func (in *Pipeline) DeepCopyInto(out *Pipeline) {
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline.
func (in *Pipeline) DeepCopy() *Pipeline {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Playbook.
func (in *Playbook) DeepCopy() *Playbook {
if in == nil {
return nil
}
out := new(Pipeline)
out := new(Playbook)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pipeline) DeepCopyObject() runtime.Object {
func (in *Playbook) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -221,66 +221,66 @@ func (in *Pipeline) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetail) DeepCopyInto(out *PipelineFailedDetail) {
func (in *PlaybookFailedDetail) DeepCopyInto(out *PlaybookFailedDetail) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]PipelineFailedDetailHost, len(*in))
*out = make([]PlaybookFailedDetailHost, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetail.
func (in *PipelineFailedDetail) DeepCopy() *PipelineFailedDetail {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookFailedDetail.
func (in *PlaybookFailedDetail) DeepCopy() *PlaybookFailedDetail {
if in == nil {
return nil
}
out := new(PipelineFailedDetail)
out := new(PlaybookFailedDetail)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineFailedDetailHost) DeepCopyInto(out *PipelineFailedDetailHost) {
func (in *PlaybookFailedDetailHost) DeepCopyInto(out *PlaybookFailedDetailHost) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFailedDetailHost.
func (in *PipelineFailedDetailHost) DeepCopy() *PipelineFailedDetailHost {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookFailedDetailHost.
func (in *PlaybookFailedDetailHost) DeepCopy() *PlaybookFailedDetailHost {
if in == nil {
return nil
}
out := new(PipelineFailedDetailHost)
out := new(PlaybookFailedDetailHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineList) DeepCopyInto(out *PipelineList) {
func (in *PlaybookList) DeepCopyInto(out *PlaybookList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pipeline, len(*in))
*out = make([]Playbook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList.
func (in *PipelineList) DeepCopy() *PipelineList {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookList.
func (in *PlaybookList) DeepCopy() *PlaybookList {
if in == nil {
return nil
}
out := new(PipelineList)
out := new(PlaybookList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PipelineList) DeepCopyObject() runtime.Object {
func (in *PlaybookList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -288,22 +288,22 @@ func (in *PipelineList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineProject) DeepCopyInto(out *PipelineProject) {
func (in *PlaybookProject) DeepCopyInto(out *PlaybookProject) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineProject.
func (in *PipelineProject) DeepCopy() *PipelineProject {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookProject.
func (in *PlaybookProject) DeepCopy() *PlaybookProject {
if in == nil {
return nil
}
out := new(PipelineProject)
out := new(PlaybookProject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
func (in *PlaybookSpec) DeepCopyInto(out *PlaybookSpec) {
*out = *in
out.Project = in.Project
if in.InventoryRef != nil {
@ -338,50 +338,50 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) {
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec.
func (in *PipelineSpec) DeepCopy() *PipelineSpec {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookSpec.
func (in *PlaybookSpec) DeepCopy() *PlaybookSpec {
if in == nil {
return nil
}
out := new(PipelineSpec)
out := new(PlaybookSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) {
func (in *PlaybookStatus) DeepCopyInto(out *PlaybookStatus) {
*out = *in
out.TaskResult = in.TaskResult
if in.FailedDetail != nil {
in, out := &in.FailedDetail, &out.FailedDetail
*out = make([]PipelineFailedDetail, len(*in))
*out = make([]PlaybookFailedDetail, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus.
func (in *PipelineStatus) DeepCopy() *PipelineStatus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookStatus.
func (in *PlaybookStatus) DeepCopy() *PlaybookStatus {
if in == nil {
return nil
}
out := new(PipelineStatus)
out := new(PlaybookStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PipelineTaskResult) DeepCopyInto(out *PipelineTaskResult) {
func (in *PlaybookTaskResult) DeepCopyInto(out *PlaybookTaskResult) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskResult.
func (in *PipelineTaskResult) DeepCopy() *PipelineTaskResult {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaybookTaskResult.
func (in *PlaybookTaskResult) DeepCopy() *PlaybookTaskResult {
if in == nil {
return nil
}
out := new(PipelineTaskResult)
out := new(PlaybookTaskResult)
in.DeepCopyInto(out)
return out
}

View File

@ -17,17 +17,16 @@ limitations under the License.
package v1alpha1
import (
"fmt"
"github.com/cockroachdb/errors"
"k8s.io/apimachinery/pkg/runtime"
)
// TaskOwnerField is the field name of the owner reference in the task.
// It defined in proxy transport. Not applicable in kube-apiserver.
const TaskOwnerField = "ownerReferences:pipeline"
const TaskOwnerField = "ownerReferences:playbook"
// AddConversionFuncs adds the conversion functions to the given scheme.
// NOTE: ownerReferences:pipeline is valid in proxy client.
// NOTE: ownerReferences:playbook is valid in proxy client.
func AddConversionFuncs(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("Task"),
@ -36,7 +35,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error {
case "metadata.name", "metadata.namespace", TaskOwnerField:
return label, value, nil
default:
return "", "", fmt.Errorf("field label %q not supported for Task", label)
return "", "", errors.Errorf("field label %q not supported for Task", label)
}
},
)

View File

@ -81,7 +81,7 @@ type TaskHostResult struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:scope=Namespaced
// Task of pipeline
// Task of playbook
type Task struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@ -3,19 +3,22 @@ module github.com/kubesphere/kubekey/api
go 1.23.3
require (
github.com/cockroachdb/errors v1.11.3
github.com/stretchr/testify v1.9.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
k8s.io/apimachinery v0.31.3
k8s.io/klog/v2 v2.130.1
sigs.k8s.io/cluster-api v1.9.2
sigs.k8s.io/controller-runtime v0.19.3
)
require (
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
@ -26,18 +29,23 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.31.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect

View File

@ -1,3 +1,9 @@
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -7,6 +13,10 @@ github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtz
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
@ -56,11 +66,15 @@ github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=

View File

@ -21,6 +21,7 @@ import (
"reflect"
"strings"
"github.com/cockroachdb/errors"
"gopkg.in/yaml.v3"
)
@ -75,7 +76,7 @@ type Task struct {
func (b *Block) UnmarshalYAML(node *yaml.Node) error {
// fill baseInfo
if err := node.Decode(&b.BlockBase); err != nil {
return fmt.Errorf("failed to decode block, error: %w", err)
return errors.Wrap(err, "failed to decode block")
}
for i := 0; i < len(node.Content); i += 2 {
@ -93,7 +94,7 @@ func (b *Block) UnmarshalYAML(node *yaml.Node) error {
}
if err := node.Decode(&b.Task); err != nil {
return fmt.Errorf("failed to decode task: %w", err)
return errors.Wrap(err, "failed to decode task")
}
b.UnknownField = collectUnknownFields(node, append(getFieldNames(reflect.TypeOf(BlockBase{})), getFieldNames(reflect.TypeOf(Task{}))...))

View File

@ -17,8 +17,7 @@ limitations under the License.
package v1
import (
"errors"
"github.com/cockroachdb/errors"
"gopkg.in/yaml.v3"
)
@ -43,7 +42,7 @@ func (w *When) UnmarshalYAML(node *yaml.Node) error {
}
case yaml.SequenceNode:
if err := node.Decode(&w.Data); err != nil {
return err
return errors.WithStack(err)
}
for i, v := range w.Data {
if !IsTmplSyntax(v) {

View File

@ -17,8 +17,7 @@ limitations under the License.
package v1
import (
"errors"
"github.com/cockroachdb/errors"
"gopkg.in/yaml.v3"
)

View File

@ -17,8 +17,9 @@ limitations under the License.
package v1
import (
"errors"
"strings"
"github.com/cockroachdb/errors"
)
// NOTE:

View File

@ -1,8 +1,8 @@
# the pod's WORKDIR which set by image. store the runtime files.
work_dir: /kubekey
# binary_dir may mount by pipeline. usage it shouldn't be changed.
# binary_dir may mount by playbook. usage it shouldn't be changed.
binary_dir: /capkk/kubekey
# cloud_config_dir may mount by pipeline. usage it shouldn't be changed.
# cloud_config_dir may mount by playbook. usage it shouldn't be changed.
cloud_config_dir: /capkk/cloud
# tmp_dir for kubekey in remote node. it will store file like binary package, iso file etc.
tmp_dir: /tmp/kubekey

View File

@ -24,7 +24,7 @@ import (
)
//go:embed playbooks roles
var BuiltinPipeline embed.FS
var BuiltinPlaybook embed.FS
//go:embed defaults
var Defaults embed.FS

View File

@ -25,6 +25,7 @@ import (
"runtime/pprof"
"strings"
"github.com/cockroachdb/errors"
"github.com/google/gops/agent"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
@ -58,12 +59,12 @@ func InitProfiling(ctx context.Context) error {
case "cpu":
f, err = os.Create(profileOutput)
if err != nil {
return err
return errors.WithStack(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
return err
return errors.WithStack(err)
}
// Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
// output anything. We choose to sample all events.
@ -74,7 +75,7 @@ func InitProfiling(ctx context.Context) error {
default:
// Check the profile name is valid.
if profile := pprof.Lookup(profileName); profile == nil {
return fmt.Errorf("unknown profile '%s'", profileName)
return errors.Errorf("unknown profile '%s'", profileName)
}
}
@ -112,12 +113,12 @@ func FlushProfiling() error {
f, err := os.Create(profileOutput)
if err != nil {
return err
return errors.WithStack(err)
}
defer f.Close()
if err := profile.WriteTo(f, 0); err != nil {
return err
return errors.WithStack(err)
}
}
@ -141,7 +142,7 @@ func InitGOPS() error {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
return errors.WithStack(err)
}
}

View File

@ -21,13 +21,14 @@ import (
"sort"
"strings"
"github.com/cockroachdb/errors"
cliflag "k8s.io/component-base/cli/flag"
ctrl "sigs.k8s.io/controller-runtime"
)
// ControllerManagerServerOptions for NewControllerManagerServerOptions
type ControllerManagerServerOptions struct {
// Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.
// Debug mode, after a successful execution of Playbook, will retain runtime data, which includes task execution status and parameters.
Debug bool
MaxConcurrentReconciles int
@ -100,7 +101,7 @@ func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
gfs := fss.FlagSet("generic")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, "+"will retain runtime data, which includes task execution status and parameters.")
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Playbook, "+"will retain runtime data, which includes task execution status and parameters.")
cfs := fss.FlagSet("controller-manager")
cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
@ -128,7 +129,7 @@ var controllers []Controller
func Register(reconciler Controller) error {
for _, c := range controllers {
if c.Name() == reconciler.Name() {
return fmt.Errorf("%s has register", reconciler.Name())
return errors.Errorf("%s has register", reconciler.Name())
}
}
controllers = append(controllers, reconciler)

View File

@ -19,6 +19,7 @@ package app
import (
"context"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
@ -36,7 +37,7 @@ func NewControllerManagerCommand() *cobra.Command {
Short: "kubekey controller manager",
PersistentPreRunE: func(*cobra.Command, []string) error {
if err := options.InitGOPS(); err != nil {
return err
return errors.WithStack(err)
}
return options.InitProfiling(ctx)

View File

@ -20,6 +20,7 @@ limitations under the License.
package builtin
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
@ -45,12 +46,12 @@ func newArtifactExportCommand() *cobra.Command {
Use: "export",
Short: "Export a KubeKey offline installation package",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()
@ -68,12 +69,12 @@ func newArtifactImagesCommand() *cobra.Command {
Use: "images",
Short: "push images to a registry from an artifact",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()

View File

@ -20,6 +20,7 @@ limitations under the License.
package builtin
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
@ -45,12 +46,12 @@ func newCertsRenewCommand() *cobra.Command {
Use: "renew",
Short: "renew a cluster certs",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()

View File

@ -20,6 +20,7 @@ limitations under the License.
package builtin
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
@ -44,12 +45,12 @@ func newCreateClusterCommand() *cobra.Command {
Use: "cluster",
Short: "Create a Kubernetes or KubeSphere cluster",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()

View File

@ -20,6 +20,7 @@ limitations under the License.
package builtin
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
@ -46,12 +47,12 @@ func newInitOSCommand() *cobra.Command {
Use: "os",
Short: "Init operating system",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()
@ -69,12 +70,12 @@ func newInitRegistryCommand() *cobra.Command {
Use: "registry",
Short: "Init a local image registry",
RunE: func(cmd *cobra.Command, _ []string) error {
pipeline, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"})
playbook, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"})
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()

View File

@ -20,6 +20,7 @@ limitations under the License.
package builtin
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
@ -40,12 +41,12 @@ func NewPreCheckCommand() *cobra.Command {
Short: "Check if the nodes is eligible for cluster deployment.",
Long: "the tags can specify check items. support: etcd, os, network, cri, nfs.",
RunE: func(cmd *cobra.Command, args []string) error {
pipeline, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml"))
playbook, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml"))
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
flags := cmd.Flags()

View File

@ -20,8 +20,7 @@ limitations under the License.
package builtin
import (
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -52,9 +51,9 @@ func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "artifact-export-",
Namespace: o.Namespace,
@ -66,23 +65,23 @@ func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kk
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
SkipTags: []string{"certs"},
}
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
return nil, fmt.Errorf("cannot get local inventory. error is %w. Please set it by \"--inventory\"", err)
return nil, errors.Wrap(err, "failed to get local inventory. Please set it by \"--inventory\"")
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, nil
return playbook, nil
}
// ======================================================================================
@ -107,9 +106,9 @@ func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "artifact-images-",
Namespace: o.Namespace,
@ -121,19 +120,19 @@ func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kk
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
Tags: []string{"only_image"},
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, nil
return playbook, nil
}

View File

@ -23,6 +23,7 @@ import (
"fmt"
"os"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"k8s.io/apimachinery/pkg/util/yaml"
@ -33,30 +34,31 @@ func completeInventory(inventoryFile string, inventory *kkcorev1.Inventory) erro
if inventoryFile != "" {
data, err := os.ReadFile(inventoryFile)
if err != nil {
return fmt.Errorf("cannot get inventory for inventoryFile: %s. error is %w.", inventoryFile, err)
return errors.Wrapf(err, "failed to get inventory for inventoryFile: %q", inventoryFile)
}
return yaml.Unmarshal(data, inventory)
return errors.Wrapf(yaml.Unmarshal(data, inventory), "failed to unmarshal inventoryFile %s", inventoryFile)
}
data, err := core.Defaults.ReadFile("defaults/inventory/localhost.yaml")
if err != nil {
return fmt.Errorf("cannot get local inventory. error is %w. Please set it by \"--inventory\"", err)
return errors.Wrap(err, "failed to get local inventory. Please set it by \"--inventory\"")
}
return yaml.Unmarshal(data, inventory)
return errors.Wrapf(yaml.Unmarshal(data, inventory), "failed to unmarshal local inventoryFile %q", inventoryFile)
}
func completeConfig(kubeVersion string, configFile string, config *kkcorev1.Config) error {
if configFile != "" {
data, err := os.ReadFile(configFile)
if err != nil {
return fmt.Errorf("cannot get config for configFile: %s. error is %w.", kubeVersion, err)
return errors.Wrapf(err, "failed to get configFile %q", configFile)
}
return yaml.Unmarshal(data, config)
return errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal configFile %q", configFile)
}
data, err := core.Defaults.ReadFile(fmt.Sprintf("defaults/config/%s.yaml", kubeVersion))
if err != nil {
return fmt.Errorf("cannot get config for kube_version: %s. error is %w. Please set it by \"--config\"", kubeVersion, err)
return errors.Wrapf(err, "failed to get local configFile for kube_version: %q. Please set it by \"--config\"", kubeVersion)
}
return yaml.Unmarshal(data, config)
return errors.Wrapf(yaml.Unmarshal(data, config), "failed to unmarshal local configFile for kube_version: %q.", kubeVersion)
}

View File

@ -20,8 +20,7 @@ limitations under the License.
package builtin
import (
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -48,9 +47,9 @@ func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "certs-renew-",
Namespace: o.Namespace,
@ -61,17 +60,17 @@ func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
}
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
Tags: []string{"certs"},
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, nil
return playbook, nil
}

View File

@ -22,6 +22,7 @@ package builtin
import (
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -64,9 +65,9 @@ func (o *CreateClusterOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "create-cluster-",
Namespace: o.Namespace,
@ -78,38 +79,38 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkc
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
}
// override kube_version in config
if err := completeConfig(o.Kubernetes, o.CommonOptions.ConfigFile, o.CommonOptions.Config); err != nil {
return nil, fmt.Errorf("cannot get config for kube_version: %s. error is %w. Please set it by \"--config\"", o.Kubernetes, err)
return nil, errors.WithStack(err)
}
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
return nil, fmt.Errorf("cannot get local inventory. error is %w. Please set it by \"--inventory\"", err)
return nil, errors.WithStack(err)
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, o.completeConfig()
return playbook, o.completeConfig()
}
func (o *CreateClusterOptions) completeConfig() error {
if o.ContainerManager != "" {
// override container_manager in config
if err := o.CommonOptions.Config.SetValue("cri.container_manager", o.ContainerManager); err != nil {
return err
return errors.WithStack(err)
}
}
if err := o.CommonOptions.Config.SetValue("kube_version", o.Kubernetes); err != nil {
return err
return errors.WithStack(err)
}
return nil

View File

@ -20,9 +20,9 @@ limitations under the License.
package builtin
import (
"fmt"
"path/filepath"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -54,9 +54,9 @@ func (o *InitOSOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "init-os-",
Namespace: o.Namespace,
@ -68,36 +68,36 @@ func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.P
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.Wrap(err, "failed to complete playbook")
}
return pipeline, o.complateConfig()
return playbook, o.complateConfig()
}
func (o *InitOSOptions) complateConfig() error {
if workdir, err := o.CommonOptions.Config.GetValue("workdir"); err != nil {
if workdir, err := o.CommonOptions.Config.GetValue(_const.Workdir); err != nil {
// workdir should set by CommonOptions
return err
return errors.Wrapf(err, "failed to get value %q in config", _const.Workdir)
} else {
wd, ok := workdir.(string)
if !ok {
return fmt.Errorf("workdir should be string value")
return errors.New("workdir should be string value")
}
// set binary dir if not set
if _, err := o.CommonOptions.Config.GetValue(_const.BinaryDir); err != nil {
// not found set default
if err := o.CommonOptions.Config.SetValue(_const.BinaryDir, filepath.Join(wd, "kubekey")); err != nil {
return err
return errors.Wrapf(err, "failed to set value %q in config", _const.Workdir)
}
}
}
@ -127,9 +127,9 @@ func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "init-registry-",
Namespace: o.Namespace,
@ -141,17 +141,17 @@ func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkco
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.Wrap(err, "failed to complete playbook")
}
return pipeline, nil
return playbook, nil
}

View File

@ -20,8 +20,7 @@ limitations under the License.
package builtin
import (
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -46,9 +45,9 @@ func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets {
return o.CommonOptions.Flags()
}
// Complete options. create Pipeline, Config and Inventory
func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "precheck-",
Namespace: o.Namespace,
@ -61,7 +60,7 @@ func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1
// complete playbook. now only support one playbook
var tags []string
if len(args) < 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
} else if len(args) == 1 {
o.Playbook = args[0]
} else {
@ -69,17 +68,17 @@ func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1
o.Playbook = args[len(args)-1]
}
pipeline.Spec = kkcorev1.PipelineSpec{
playbook.Spec = kkcorev1.PlaybookSpec{
Playbook: o.Playbook,
Debug: o.Debug,
Tags: tags,
}
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
return nil, fmt.Errorf("cannot get local inventory. error is %w. Please set it by \"--inventory\"", err)
return nil, errors.WithStack(err)
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, nil
return playbook, nil
}

View File

@ -19,12 +19,11 @@ package options
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -58,7 +57,7 @@ type CommonOptions struct {
Workdir string
// Artifact is the path to the offline package for kubekey.
Artifact string
// Debug indicates whether to retain runtime data after a successful execution of the pipeline.
// Debug indicates whether to retain runtime data after a successful execution of the playbook.
// This includes task execution status and parameters.
Debug bool
// Namespace specifies the namespace for all resources.
@ -107,42 +106,37 @@ func NewCommonOptions() CommonOptions {
}
// Run executes the main command logic for the application.
// It sets up the necessary configurations, creates the inventory and pipeline
// It sets up the necessary configurations, creates the inventory and playbook
// resources, and then runs the command manager.
func (o *CommonOptions) Run(ctx context.Context, pipeline *kkcorev1.Pipeline) error {
func (o *CommonOptions) Run(ctx context.Context, playbook *kkcorev1.Playbook) error {
// create workdir directory,if not exists
if _, err := os.Stat(o.Workdir); os.IsNotExist(err) {
if err := os.MkdirAll(o.Workdir, os.ModePerm); err != nil {
return err
return errors.WithStack(err)
}
}
restconfig := &rest.Config{}
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
return fmt.Errorf("could not get rest config: %w", err)
return errors.WithStack(err)
}
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
if err != nil {
return fmt.Errorf("could not get runtime-client: %w", err)
return errors.Wrap(err, "failed to runtime-client")
}
// create inventory
if err := client.Create(ctx, o.Inventory); err != nil {
klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
return errors.Wrap(err, "failed to create inventory")
}
// create pipeline
// pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
if err := client.Create(ctx, pipeline); err != nil {
klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
// create playbook
if err := client.Create(ctx, playbook); err != nil {
return errors.Wrap(err, "failed to create playbook")
}
return manager.NewCommandManager(manager.CommandManagerOptions{
Workdir: o.Workdir,
Pipeline: pipeline,
Playbook: playbook,
Config: o.Config,
Inventory: o.Inventory,
Client: client,
@ -160,33 +154,33 @@ func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringArrayVar(&o.Set, "set", o.Set, "set value in config. format --set key=val or --set k1=v1,k2=v2")
gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.yaml")
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace")
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Playbook, will retain runtime data, which includes task execution status and parameters.")
gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which playbook will be executed, all reference resources(playbook, config, inventory, task) should in the same namespace")
return fss
}
// Complete finalizes the CommonOptions by setting up the working directory,
// generating the configuration, and completing the inventory reference for the pipeline.
func (o *CommonOptions) Complete(pipeline *kkcorev1.Pipeline) error {
// generating the configuration, and completing the inventory reference for the playbook.
func (o *CommonOptions) Complete(playbook *kkcorev1.Playbook) error {
// Ensure the working directory is an absolute path.
if !filepath.IsAbs(o.Workdir) {
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get current dir error: %w", err)
return errors.Wrap(err, "get current dir error")
}
o.Workdir = filepath.Join(wd, o.Workdir)
}
// Generate and complete the configuration.
if err := o.completeConfig(o.Config); err != nil {
return fmt.Errorf("generate config error: %w", err)
return errors.WithStack(err)
}
pipeline.Spec.Config = ptr.Deref(o.Config, kkcorev1.Config{})
playbook.Spec.Config = ptr.Deref(o.Config, kkcorev1.Config{})
// Complete the inventory reference.
o.completeInventory(o.Inventory)
pipeline.Spec.InventoryRef = &corev1.ObjectReference{
playbook.Spec.InventoryRef = &corev1.ObjectReference{
Kind: o.Inventory.Kind,
Namespace: o.Inventory.Namespace,
Name: o.Inventory.Name,
@ -203,13 +197,13 @@ func (o *CommonOptions) completeConfig(config *kkcorev1.Config) error {
// set value by command args
if o.Workdir != "" {
if err := config.SetValue(_const.Workdir, o.Workdir); err != nil {
return fmt.Errorf("failed to set %q in config. error: %w", _const.Workdir, err)
return errors.WithStack(err)
}
}
if o.Artifact != "" {
// override artifact_file in config
if err := config.SetValue("artifact_file", o.Artifact); err != nil {
return fmt.Errorf("failed to set \"artifact_file\" in config. error: %w", err)
return errors.WithStack(err)
}
}
for _, s := range o.Set {
@ -219,7 +213,7 @@ func (o *CommonOptions) completeConfig(config *kkcorev1.Config) error {
return errors.New("--set value should be k=v")
}
if err := setValue(config, setVal[:i], setVal[i+1:]); err != nil {
return fmt.Errorf("--set value to config error: %w", err)
return errors.Wrapf(err, "failed to set value to config by \"--set\" %q", setVal[:i])
}
}
}
@ -245,7 +239,7 @@ func setValue(config *kkcorev1.Config, key, val string) error {
var value map[string]any
err := json.Unmarshal([]byte(val), &value)
if err != nil {
return err
return errors.Wrapf(err, "failed to unmarshal json for %q", key)
}
return config.SetValue(key, value)
@ -253,7 +247,7 @@ func setValue(config *kkcorev1.Config, key, val string) error {
var value []any
err := json.Unmarshal([]byte(val), &value)
if err != nil {
return err
return errors.Wrapf(err, "failed to unmarshal json for %q", key)
}
return config.SetValue(key, value)

View File

@ -25,6 +25,7 @@ import (
"runtime/pprof"
"strings"
"github.com/cockroachdb/errors"
"github.com/google/gops/agent"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
@ -58,12 +59,12 @@ func InitProfiling(ctx context.Context) error {
case "cpu":
f, err = os.Create(profileOutput)
if err != nil {
return err
return errors.Wrap(err, "failed to create cpu profile")
}
err = pprof.StartCPUProfile(f)
if err != nil {
return err
return errors.Wrap(err, "failed to start cpu profile")
}
// Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
// output anything. We choose to sample all events.
@ -74,7 +75,7 @@ func InitProfiling(ctx context.Context) error {
default:
// Check the profile name is valid.
if profile := pprof.Lookup(profileName); profile == nil {
return fmt.Errorf("unknown profile '%s'", profileName)
return errors.Errorf("unknown profile '%s'", profileName)
}
}
@ -114,12 +115,12 @@ func FlushProfiling() error {
f, err := os.Create(profileOutput)
if err != nil {
return err
return errors.Wrap(err, "failed to create profile")
}
defer f.Close()
if err := profile.WriteTo(f, 0); err != nil {
return err
return errors.Wrap(err, "failed to write profile")
}
}
@ -144,7 +145,7 @@ func InitGOPS() error {
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
// Bind to a random port on address 127.0.0.1
if err := agent.Listen(agent.Options{}); err != nil {
return err
return errors.Wrap(err, "failed to listen gops")
}
}

View File

@ -1,29 +0,0 @@
package options
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cliflag "k8s.io/component-base/cli/flag"
)
// PipelineOptions for NewPipelineOptions
type PipelineOptions struct {
Name string
Namespace string
}
// NewPipelineOptions for newPipelineCommand
func NewPipelineOptions() *PipelineOptions {
return &PipelineOptions{
Namespace: metav1.NamespaceDefault,
}
}
// Flags add to newPipelineCommand
func (o *PipelineOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
pfs := fss.FlagSet("pipeline flags")
pfs.StringVar(&o.Name, "name", o.Name, "name of pipeline")
pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of pipeline")
return fss
}

View File

@ -0,0 +1,29 @@
package options
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cliflag "k8s.io/component-base/cli/flag"
)
// PlaybookOptions for NewPlaybookOptions
type PlaybookOptions struct {
Name string
Namespace string
}
// NewPlaybookOptions for newPlaybookCommand
func NewPlaybookOptions() *PlaybookOptions {
return &PlaybookOptions{
Namespace: metav1.NamespaceDefault,
}
}
// Flags add to newPlaybookCommand
func (o *PlaybookOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
pfs := fss.FlagSet("playbook flags")
pfs.StringVar(&o.Name, "name", o.Name, "name of playbook")
pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of playbook")
return fss
}

View File

@ -17,8 +17,7 @@ limitations under the License.
package options
import (
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -78,9 +77,9 @@ func (o *KubeKeyRunOptions) Flags() cliflag.NamedFlagSets {
return fss
}
// Complete options. create Pipeline, Config and Inventory
func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, error) {
pipeline := &kkcorev1.Pipeline{
// Complete options. create Playbook, Config and Inventory
func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Playbook, error) {
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "run-",
Namespace: o.Namespace,
@ -89,12 +88,12 @@ func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
}
// complete playbook. now only support one playbook
if len(args) != 1 {
return nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
return nil, errors.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
}
o.Playbook = args[0]
pipeline.Spec = kkcorev1.PipelineSpec{
Project: kkcorev1.PipelineProject{
playbook.Spec = kkcorev1.PlaybookSpec{
Project: kkcorev1.PlaybookProject{
Addr: o.ProjectAddr,
Name: o.ProjectName,
Branch: o.ProjectBranch,
@ -107,9 +106,9 @@ func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
SkipTags: o.SkipTags,
Debug: o.Debug,
}
if err := o.CommonOptions.Complete(pipeline); err != nil {
return nil, err
if err := o.CommonOptions.Complete(playbook); err != nil {
return nil, errors.WithStack(err)
}
return pipeline, nil
return playbook, nil
}

View File

@ -1,9 +1,9 @@
package app
import (
"fmt"
"path/filepath"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
"k8s.io/klog/v2"
@ -16,57 +16,57 @@ import (
"github.com/kubesphere/kubekey/v4/pkg/proxy"
)
func newPipelineCommand() *cobra.Command {
o := options.NewPipelineOptions()
func newPlaybookCommand() *cobra.Command {
o := options.NewPlaybookOptions()
cmd := &cobra.Command{
Use: "pipeline",
Short: "Executor a pipeline in kubernetes",
Use: "playbook",
Short: "Executor a playbook in kubernetes",
RunE: func(cmd *cobra.Command, _ []string) error {
restconfig, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("cannot get restconfig in kubernetes. error is %w", err)
return errors.Wrap(err, "failed to get restconfig")
}
kubeclient, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
if err != nil {
return fmt.Errorf("could not create client: %w", err)
return errors.Wrap(err, "failed to create client")
}
// get pipeline
pipeline := &kkcorev1.Pipeline{}
// get playbook
playbook := &kkcorev1.Playbook{}
if err := kubeclient.Get(cmd.Context(), ctrlclient.ObjectKey{
Name: o.Name,
Namespace: o.Namespace,
}, pipeline); err != nil {
return err
}, playbook); err != nil {
return errors.Wrap(err, "failed to get playbook")
}
if pipeline.Status.Phase != kkcorev1.PipelinePhaseRunning {
klog.InfoS("pipeline is not running, skip", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
if playbook.Status.Phase != kkcorev1.PlaybookPhaseRunning {
klog.InfoS("playbook is not running, skip", "playbook", ctrlclient.ObjectKeyFromObject(playbook))
return nil
}
// get inventory
inventory := new(kkcorev1.Inventory)
if err := kubeclient.Get(cmd.Context(), ctrlclient.ObjectKey{
Name: pipeline.Spec.InventoryRef.Name,
Namespace: pipeline.Spec.InventoryRef.Namespace,
Name: playbook.Spec.InventoryRef.Name,
Namespace: playbook.Spec.InventoryRef.Namespace,
}, inventory); err != nil {
return err
return errors.Wrap(err, "failed to get inventory")
}
if err := proxy.RestConfig(filepath.Join(_const.GetWorkdirFromConfig(pipeline.Spec.Config), _const.RuntimeDir), restconfig); err != nil {
return fmt.Errorf("could not get rest config: %w", err)
if err := proxy.RestConfig(filepath.Join(_const.GetWorkdirFromConfig(playbook.Spec.Config), _const.RuntimeDir), restconfig); err != nil {
return errors.Wrap(err, "failed to get rest config")
}
// use proxy client to store task.
proxyclient, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
if err != nil {
return fmt.Errorf("could not create client: %w", err)
return errors.Wrap(err, "failed to create client")
}
return manager.NewCommandManager(manager.CommandManagerOptions{
Pipeline: pipeline,
Playbook: playbook,
Inventory: inventory,
Client: proxyclient,
}).Run(cmd.Context())

View File

@ -18,13 +18,12 @@ package app
import (
"context"
"fmt"
"path/filepath"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
@ -43,7 +42,7 @@ func NewRootCommand() *cobra.Command {
Long: "kubekey is a daemon that execute command in a node",
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
if err := options.InitGOPS(); err != nil {
return err
return errors.WithStack(err)
}
return options.InitProfiling(cmd.Context())
@ -60,7 +59,7 @@ func NewRootCommand() *cobra.Command {
options.AddGOPSFlags(flags)
// add children command
cmd.AddCommand(newRunCommand())
cmd.AddCommand(newPipelineCommand())
cmd.AddCommand(newPlaybookCommand())
cmd.AddCommand(newVersionCommand())
// internal command
cmd.AddCommand(internalCommand...)
@ -69,46 +68,41 @@ func NewRootCommand() *cobra.Command {
}
// CommandRunE executes the main command logic for the application.
// It sets up the necessary configurations, creates the inventory and pipeline
// It sets up the necessary configurations, creates the inventory and playbook
// resources, and then runs the command manager.
//
// Parameters:
// - ctx: The context for controlling the execution flow.
// - workdir: The working directory path.
// - pipeline: The pipeline resource to be created and managed.
// - playbook: The playbook resource to be created and managed.
// - config: The configuration resource.
// - inventory: The inventory resource to be created.
//
// Returns:
// - error: An error if any step in the process fails, otherwise nil.
func CommandRunE(ctx context.Context, workdir string, pipeline *kkcorev1.Pipeline, config *kkcorev1.Config, inventory *kkcorev1.Inventory) error {
func CommandRunE(ctx context.Context, workdir string, playbook *kkcorev1.Playbook, config *kkcorev1.Config, inventory *kkcorev1.Inventory) error {
restconfig := &rest.Config{}
if err := proxy.RestConfig(filepath.Join(workdir, _const.RuntimeDir), restconfig); err != nil {
return fmt.Errorf("could not get rest config: %w", err)
return errors.Wrap(err, "failed to get restconfig")
}
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
Scheme: _const.Scheme,
})
if err != nil {
return fmt.Errorf("could not get runtime-client: %w", err)
return errors.Wrap(err, "failed to get runtime-client")
}
// create inventory
if err := client.Create(ctx, inventory); err != nil {
klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
return errors.Wrap(err, "failed to create inventory")
}
// create pipeline
// pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
if err := client.Create(ctx, pipeline); err != nil {
klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
// create playbook
if err := client.Create(ctx, playbook); err != nil {
return errors.Wrap(err, "failed to create playbook")
}
return manager.NewCommandManager(manager.CommandManagerOptions{
Workdir: workdir,
Pipeline: pipeline,
Playbook: playbook,
Config: config,
Inventory: inventory,
Client: client,

View File

@ -17,6 +17,7 @@ limitations under the License.
package app
import (
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
@ -29,12 +30,12 @@ func newRunCommand() *cobra.Command {
Use: "run [playbook]",
Short: "run a playbook by playbook file. the file source can be git or local",
RunE: func(cmd *cobra.Command, args []string) error {
pipeline, err := o.Complete(cmd, args)
playbook, err := o.Complete(cmd, args)
if err != nil {
return err
return errors.WithStack(err)
}
return o.CommonOptions.Run(cmd.Context(), pipeline)
return o.CommonOptions.Run(cmd.Context(), playbook)
},
}
for _, f := range o.Flags().FlagSets {

View File

@ -4,14 +4,14 @@ kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2-0.20250203100058-d1f73b5ea552
name: pipelines.kubekey.kubesphere.io
name: playbooks.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
names:
kind: Pipeline
listKind: PipelineList
plural: pipelines
singular: pipeline
kind: Playbook
listKind: PlaybookList
plural: playbooks
singular: playbook
scope: Namespaced
versions:
- additionalPrinterColumns:
@ -30,7 +30,7 @@ spec:
name: v1
schema:
openAPIV3Schema:
description: Pipeline resource executor a playbook.
description: Playbook resource executor a playbook.
properties:
apiVersion:
description: |-
@ -50,7 +50,7 @@ spec:
metadata:
type: object
spec:
description: PipelineSpec of pipeline.
description: PlaybookSpec of playbook.
properties:
config:
description: Config is the global variable configuration for playbook
@ -76,7 +76,7 @@ spec:
type: object
debug:
description: |-
If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
If Debug mode is true, It will retain runtime data after a successful execution of Playbook,
which includes task execution status and parameters.
type: boolean
inventoryRef:
@ -1996,18 +1996,18 @@ spec:
- playbook
type: object
status:
description: PipelineStatus of Pipeline
description: PlaybookStatus of Playbook
properties:
failedDetail:
description: FailedDetail will record the failed tasks.
items:
description: PipelineFailedDetail store failed message when pipeline
description: PlaybookFailedDetail store failed message when playbook
run failed.
properties:
hosts:
description: failed Hosts Result of failed task.
items:
description: PipelineFailedDetailHost detail failed message
description: PlaybookFailedDetailHost detail failed message
for each host.
properties:
host:
@ -2035,7 +2035,7 @@ spec:
a terminal problem
type: string
phase:
description: Phase of pipeline.
description: Phase of playbook.
type: string
taskResult:
description: TaskResult total related tasks execute result.

View File

@ -9,7 +9,7 @@ resources:
- crds/infrastructure.cluster.x-k8s.io_kkmachines.yaml
- crds/infrastructure.cluster.x-k8s.io_kkmachinetemplates.yaml
- crds/kubekey.kubesphere.io_inventories.yaml
- crds/kubekey.kubesphere.io_pipelines.yaml
- crds/kubekey.kubesphere.io_playbooks.yaml
- webhook/manifests.yaml
- webhook/issuer.yaml
- rbac/role.yaml

View File

@ -10,9 +10,9 @@ webhooks:
service:
name: kk-webhook-service
namespace: capkk-system
path: /mutate-kubekey-kubesphere-io-v1-pipeline
path: /mutate-kubekey-kubesphere-io-v1-playbook
failurePolicy: Fail
name: default.pipeline.kubekey.kubesphere.io
name: default.playbook.kubekey.kubesphere.io
rules:
- apiGroups:
- kubekey.kubesphere.io
@ -22,7 +22,7 @@ webhooks:
- CREATE
- UPDATE
resources:
- pipelines
- playbooks
sideEffects: None
- admissionReviewVersions:
- v1

View File

@ -4,14 +4,14 @@ kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2-0.20250203100058-d1f73b5ea552
name: pipelines.kubekey.kubesphere.io
name: playbooks.kubekey.kubesphere.io
spec:
group: kubekey.kubesphere.io
names:
kind: Pipeline
listKind: PipelineList
plural: pipelines
singular: pipeline
kind: Playbook
listKind: PlaybookList
plural: playbooks
singular: playbook
scope: Namespaced
versions:
- additionalPrinterColumns:
@ -30,7 +30,7 @@ spec:
name: v1
schema:
openAPIV3Schema:
description: Pipeline resource executor a playbook.
description: Playbook resource executor a playbook.
properties:
apiVersion:
description: |-
@ -50,7 +50,7 @@ spec:
metadata:
type: object
spec:
description: PipelineSpec of pipeline.
description: PlaybookSpec of playbook.
properties:
config:
description: Config is the global variable configuration for playbook
@ -76,7 +76,7 @@ spec:
type: object
debug:
description: |-
If Debug mode is true, It will retain runtime data after a successful execution of Pipeline,
If Debug mode is true, It will retain runtime data after a successful execution of Playbook,
which includes task execution status and parameters.
type: boolean
inventoryRef:
@ -1996,18 +1996,18 @@ spec:
- playbook
type: object
status:
description: PipelineStatus of Pipeline
description: PlaybookStatus of Playbook
properties:
failedDetail:
description: FailedDetail will record the failed tasks.
items:
description: PipelineFailedDetail store failed message when pipeline
description: PlaybookFailedDetail store failed message when playbook
run failed.
properties:
hosts:
description: failed Hosts Result of failed task.
items:
description: PipelineFailedDetailHost detail failed message
description: PlaybookFailedDetailHost detail failed message
for each host.
properties:
host:
@ -2035,7 +2035,7 @@ spec:
a terminal problem
type: string
phase:
description: Phase of pipeline.
description: Phase of playbook.
type: string
taskResult:
description: TaskResult total related tasks execute result.

View File

@ -52,7 +52,7 @@ spec:
- --leader-election=true
{{-if not .Values.controller-manager.webhook.enabled }}
- --controllers=*
- --controllers=-pipeline-webhook
- --controllers=-playbook-webhook
{{- end }}
{{- end }}
env:

View File

@ -68,12 +68,12 @@ webhooks:
service:
name: kk-webhook-service
namespace: {{ .Release.Namespace }}
path: /mutate-kubekey-kubesphere-io-v1beta1-pipeline
path: /mutate-kubekey-kubesphere-io-v1beta1-playbook
{{- if not $isCertManager }}
caBundle: {{ b64enc $ca.Cert }}
{{- end }}
failurePolicy: Fail
name: default.pipeline.kubekey.kubesphere.io
name: default.playbook.kubekey.kubesphere.io
rules:
- apiGroups:
- kubekey.kubesphere.io
@ -83,6 +83,6 @@ webhooks:
- CREATE
- UPDATE
resources:
- pipelines
- playbooks
sideEffects: None
{{- end }}

29
go.mod
View File

@ -4,6 +4,8 @@ go 1.23.3
require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/cockroachdb/errors v1.11.3
github.com/containerd/containerd v1.7.27
github.com/fsnotify/fsnotify v1.7.0
github.com/go-git/go-git/v5 v5.11.0
github.com/google/gops v0.3.28
@ -35,7 +37,7 @@ require (
dario.cat/mergo v1.0.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
@ -44,6 +46,11 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
@ -53,19 +60,20 @@ require (
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/cel-go v0.20.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@ -78,7 +86,10 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
@ -96,8 +107,10 @@ require (
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
@ -118,20 +131,18 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.65.1 // indirect
google.golang.org/protobuf v1.35.1 // indirect
google.golang.org/protobuf v1.35.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect

62
go.sum
View File

@ -9,8 +9,8 @@ github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lpr
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
@ -33,6 +33,20 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/corefile-migration v1.0.24 h1:NL/zRKijhJZLYlNnMr891DRv5jXgfd3Noons1M6oTpc=
@ -71,8 +85,12 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
@ -88,13 +106,14 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
@ -112,8 +131,8 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -155,6 +174,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -189,8 +210,11 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
@ -208,13 +232,14 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/schollz/progressbar/v3 v3.14.5 h1:97RrSxbBASxQuZN9yemnyGrFZ/swnG6IrEe2R0BseX8=
github.com/schollz/progressbar/v3 v3.14.5/go.mod h1:Nrzpuw3Nl0srLY0VlTvC4V6RL50pcEymjy6qyJAaLa0=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@ -239,6 +264,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@ -255,8 +281,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w=
go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4=
go.etcd.io/etcd/client/pkg/v3 v3.5.17 h1:XxnDXAWq2pnxqx76ljWwiQ9jylbpC4rvkAeRVOUKKVw=
@ -311,8 +337,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -324,8 +348,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -388,16 +412,16 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.65.1 h1:toSN4j5/Xju+HVovfaY5g1YZVuJeHzQZhP8eJ0L0f1I=
google.golang.org/grpc v1.65.1/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=

View File

@ -18,13 +18,12 @@ package connector
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"github.com/cockroachdb/errors"
"k8s.io/klog/v2"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
@ -62,7 +61,7 @@ type Connector interface {
func NewConnector(host string, v variable.Variable) (Connector, error) {
vars, err := v.Get(variable.GetAllVariable(host))
if err != nil {
return nil, fmt.Errorf("failed to get %q host variable for", host)
return nil, errors.Wrapf(err, "failed to get host %q variable", host)
}
connectorVars := make(map[string]any)
if c1, ok := vars.(map[string]any)[_const.VariableConnector]; ok {
@ -80,7 +79,7 @@ func NewConnector(host string, v variable.Variable) (Connector, error) {
case connectedKubernetes:
workdir, err := v.Get(variable.GetWorkDir())
if err != nil {
return nil, fmt.Errorf("failed to get workdir from variable. error is %w", err)
return nil, errors.Wrap(err, "failed to get workdir from variable")
}
wd, ok := workdir.(string)
if !ok {

View File

@ -23,6 +23,7 @@ import (
"os"
"path/filepath"
"github.com/cockroachdb/errors"
"k8s.io/klog/v2"
"k8s.io/utils/exec"
@ -37,7 +38,7 @@ var _ Connector = &kubernetesConnector{}
func newKubernetesConnector(host string, workdir string, connectorVars map[string]any) (*kubernetesConnector, error) {
kubeconfig, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorKubeconfig)
if err != nil && host != _const.VariableLocalHost {
return nil, err
return nil, errors.WithStack(err)
}
return &kubernetesConnector{
@ -68,27 +69,29 @@ func (c *kubernetesConnector) Init(_ context.Context) error {
}
// set home dir for each kubernetes
c.homedir = filepath.Join(c.workdir, _const.KubernetesDir, c.clusterName)
if _, err := os.Stat(c.homedir); err != nil && os.IsNotExist(err) {
if _, err := os.Stat(c.homedir); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat local dir %q for cluster %q", c.homedir, c.clusterName)
}
// if dir is not exist, create it.
if err := os.MkdirAll(c.homedir, os.ModePerm); err != nil {
klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
// if dir is not exist, create it.
return err
return errors.Wrapf(err, "failed to create local dir %q for cluster %q", c.homedir, c.clusterName)
}
}
// create kubeconfig path in home dir
kubeconfigPath := filepath.Join(c.homedir, kubeconfigRelPath)
if _, err := os.Stat(kubeconfigPath); err != nil && os.IsNotExist(err) {
if _, err := os.Stat(kubeconfigPath); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat local path %q for cluster %q", kubeconfigPath, c.clusterName)
}
if err := os.MkdirAll(filepath.Dir(kubeconfigPath), os.ModePerm); err != nil {
klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
return err
return errors.Wrapf(err, "failed to create local path %q for cluster %q", kubeconfigPath, c.clusterName)
}
}
// write kubeconfig to home dir
if err := os.WriteFile(kubeconfigPath, []byte(c.kubeconfig), os.ModePerm); err != nil {
klog.V(4).ErrorS(err, "Failed to create kubeconfig file", "cluster", c.clusterName)
return err
return errors.Wrapf(err, "failed to create kubeconfig file for cluster %q", c.clusterName)
}
// find command interpreter in env. default /bin/bash
sl, ok := os.LookupEnv(_const.ENV_SHELL)
@ -109,15 +112,19 @@ func (c *kubernetesConnector) Close(_ context.Context) error {
// and it may be necessary to keep them in separate directories locally.
func (c *kubernetesConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
dst = filepath.Join(c.homedir, dst)
if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
if _, err := os.Stat(filepath.Dir(dst)); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat local dir %q for cluster %q", dst, c.clusterName)
}
if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
return err
return errors.Wrapf(err, "failed to create local dir %q for cluster %q", dst, c.clusterName)
}
}
if err := os.WriteFile(dst, src, mode); err != nil {
return errors.Wrapf(err, "failed to write file %q for cluster %q", dst, c.clusterName)
}
return os.WriteFile(dst, src, mode)
return nil
}
// FetchFile copy src file to dst writer. src is the local filename, dst is the local writer.

View File

@ -19,13 +19,13 @@ package connector
import (
"bytes"
"context"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"github.com/cockroachdb/errors"
"k8s.io/klog/v2"
"k8s.io/utils/exec"
@ -38,7 +38,7 @@ var _ GatherFacts = &localConnector{}
func newLocalConnector(connectorVars map[string]any) *localConnector {
password, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPassword)
if err != nil {
if err != nil { // password is not necessary when execute with root user.
klog.V(4).InfoS("get connector sudo password failed, execute command without sudo", "error", err)
}
@ -70,31 +70,29 @@ func (c *localConnector) Close(context.Context) error {
// PutFile copies the src file to the dst file. src is the local filename, dst is the local filename.
func (c *localConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
if _, err := os.Stat(filepath.Dir(dst)); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat local dir %q", dst)
}
if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
return err
return errors.Wrapf(err, "failed to create local dir %q", dst)
}
}
if err := os.WriteFile(dst, src, mode); err != nil {
return errors.Wrapf(err, "failed to write file %q", dst)
}
return os.WriteFile(dst, src, mode)
return nil
}
// FetchFile copies the src file to the dst writer. src is the local filename, dst is the local writer.
func (c *localConnector) FetchFile(_ context.Context, src string, dst io.Writer) error {
var err error
file, err := os.Open(src)
if err != nil {
klog.V(4).ErrorS(err, "Failed to read local file failed", "src_file", src)
return err
return errors.Wrapf(err, "failed to open local file %q", src)
}
if _, err := io.Copy(dst, file); err != nil {
klog.V(4).ErrorS(err, "Failed to copy local file", "src_file", src)
return err
return errors.Wrapf(err, "failed to copy local file %q", src)
}
return nil
@ -126,22 +124,22 @@ func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
osVars := make(map[string]any)
var osRelease bytes.Buffer
if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
return nil, fmt.Errorf("failed to fetch os-release: %w", err)
return nil, errors.Wrap(err, "failed to fetch os-release")
}
osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
kernel, err := c.ExecuteCommand(ctx, "uname -r")
if err != nil {
return nil, fmt.Errorf("get kernel version error: %w", err)
return nil, errors.Wrap(err, "failed to get kernel version")
}
osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSpace(kernel))
hn, err := c.ExecuteCommand(ctx, "hostname")
if err != nil {
return nil, fmt.Errorf("get hostname error: %w", err)
return nil, errors.Wrap(err, "failed to get hostname")
}
osVars[_const.VariableOSHostName] = string(bytes.TrimSpace(hn))
arch, err := c.ExecuteCommand(ctx, "arch")
if err != nil {
return nil, fmt.Errorf("get arch error: %w", err)
return nil, errors.Wrap(err, "failed to get arch")
}
osVars[_const.VariableOSArchitecture] = string(bytes.TrimSpace(arch))
@ -149,12 +147,12 @@ func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
procVars := make(map[string]any)
var cpu bytes.Buffer
if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
return nil, fmt.Errorf("get cpuinfo error: %w", err)
return nil, errors.Wrap(err, "failed to get cpuinfo")
}
procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
var mem bytes.Buffer
if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
return nil, fmt.Errorf("get meminfo error: %w", err)
return nil, errors.Wrap(err, "failed to get meminfo")
}
procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")
@ -164,7 +162,7 @@ func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
}, nil
default:
klog.V(4).ErrorS(nil, "Unsupported platform", "platform", runtime.GOOS)
return make(map[string]any), nil
}
return make(map[string]any), nil
}

View File

@ -19,7 +19,6 @@ package connector
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/fs"
@ -29,6 +28,7 @@ import (
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"k8s.io/klog/v2"
@ -122,11 +122,11 @@ func (c *sshConnector) Init(context.Context) error {
if _, err := os.Stat(c.PrivateKey); err == nil {
key, err := os.ReadFile(c.PrivateKey)
if err != nil {
return fmt.Errorf("read private key error: %w", err)
return errors.Wrapf(err, "failed to read private key %q", c.PrivateKey)
}
privateKey, err := ssh.ParsePrivateKey(key)
if err != nil {
return fmt.Errorf("parse private key error: %w", err)
return errors.Wrapf(err, "failed to parse private key %q", c.PrivateKey)
}
auth = append(auth, ssh.PublicKeys(privateKey))
}
@ -138,22 +138,20 @@ func (c *sshConnector) Init(context.Context) error {
Timeout: 30 * time.Second,
})
if err != nil {
klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", c.Port)
return err
return errors.Wrapf(err, "failed to dial %q:%d ssh server", c.Host, c.Port)
}
c.client = sshClient
// get shell from env
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("create session error: %w", err)
return errors.Wrap(err, "failed to create session")
}
defer session.Close()
output, err := session.CombinedOutput("echo $SHELL")
if err != nil {
return fmt.Errorf("env command error: %w", err)
return errors.Wrap(err, "failed to env command")
}
if strings.TrimSpace(string(output)) != "" {
@ -173,35 +171,32 @@ func (c *sshConnector) PutFile(_ context.Context, src []byte, dst string, mode f
// create sftp client
sftpClient, err := sftp.NewClient(c.client)
if err != nil {
klog.V(4).ErrorS(err, "Failed to create sftp client")
return err
return errors.Wrap(err, "failed to create sftp client")
}
defer sftpClient.Close()
// create remote file
if _, err := sftpClient.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
if _, err := sftpClient.Stat(filepath.Dir(dst)); err != nil {
if !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat dir %q", dst)
}
if err := sftpClient.MkdirAll(filepath.Dir(dst)); err != nil {
klog.V(4).ErrorS(err, "Failed to create remote dir", "remote_file", dst)
return err
return errors.Wrapf(err, "failed to create remote dir %q", dst)
}
}
rf, err := sftpClient.Create(dst)
if err != nil {
klog.V(4).ErrorS(err, "Failed to create remote file", "remote_file", dst)
return err
return errors.Wrapf(err, "failed to create remote file %q", dst)
}
defer rf.Close()
if _, err = rf.Write(src); err != nil {
klog.V(4).ErrorS(err, "Failed to write content to remote file", "remote_file", dst)
return err
return errors.Wrapf(err, "failed to write content to remote file %q", dst)
}
if err := rf.Chmod(mode); err != nil {
return errors.Wrapf(err, "failed to chmod remote file %q", dst)
}
return rf.Chmod(mode)
return nil
}
// FetchFile from remote node. src is the remote filename, dst is the local writer.
@ -209,24 +204,18 @@ func (c *sshConnector) FetchFile(_ context.Context, src string, dst io.Writer) e
// create sftp client
sftpClient, err := sftp.NewClient(c.client)
if err != nil {
klog.V(4).ErrorS(err, "Failed to create sftp client", "remote_file", src)
return err
return errors.Wrap(err, "failed to create sftp client")
}
defer sftpClient.Close()
rf, err := sftpClient.Open(src)
if err != nil {
klog.V(4).ErrorS(err, "Failed to open file", "remote_file", src)
return err
return errors.Wrapf(err, "failed to open remote file %q", src)
}
defer rf.Close()
if _, err := io.Copy(dst, rf); err != nil {
klog.V(4).ErrorS(err, "Failed to copy file", "remote_file", src)
return err
return errors.Wrapf(err, "failed to copy file %q", src)
}
return nil
@ -239,36 +228,34 @@ func (c *sshConnector) ExecuteCommand(_ context.Context, cmd string) ([]byte, er
// create ssh session
session, err := c.client.NewSession()
if err != nil {
klog.V(4).ErrorS(err, "Failed to create ssh session")
return nil, err
return nil, errors.Wrap(err, "failed to create ssh session")
}
defer session.Close()
// get pipe from session
stdin, err := session.StdinPipe()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to get stdin pipe")
}
stdout, err := session.StdoutPipe()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to get stdout pipe")
}
stderr, err := session.StderrPipe()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to get stderr pipe")
}
// Start the remote command
if err := session.Start(cmd); err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to start session")
}
if c.Password != "" {
if _, err := stdin.Write([]byte(c.Password + "\n")); err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to write password")
}
}
if err := stdin.Close(); err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to close stdin pipe")
}
// Create buffers to store stdout and stderr output
@ -310,22 +297,22 @@ func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
osVars := make(map[string]any)
var osRelease bytes.Buffer
if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
return nil, fmt.Errorf("failed to fetch os-release: %w", err)
return nil, errors.Wrap(err, "failed to fetch os-release")
}
osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
kernel, err := c.ExecuteCommand(ctx, "uname -r")
if err != nil {
return nil, fmt.Errorf("get kernel version error: %w", err)
return nil, errors.Wrap(err, "failed to get kernel version")
}
osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSpace(kernel))
hn, err := c.ExecuteCommand(ctx, "hostname")
if err != nil {
return nil, fmt.Errorf("get hostname error: %w", err)
return nil, errors.Wrap(err, "failed to get hostname")
}
osVars[_const.VariableOSHostName] = string(bytes.TrimSpace(hn))
arch, err := c.ExecuteCommand(ctx, "arch")
if err != nil {
return nil, fmt.Errorf("get arch error: %w", err)
return nil, errors.Wrap(err, "failed to get arch")
}
osVars[_const.VariableOSArchitecture] = string(bytes.TrimSpace(arch))
@ -333,12 +320,12 @@ func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
procVars := make(map[string]any)
var cpu bytes.Buffer
if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
return nil, fmt.Errorf("get cpuinfo error: %w", err)
return nil, errors.Wrap(err, "failed to get cpuinfo")
}
procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
var mem bytes.Buffer
if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
return nil, fmt.Errorf("get meminfo error: %w", err)
return nil, errors.Wrap(err, "failed to get meminfo error")
}
procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")

View File

@ -85,32 +85,32 @@ const ( // === From runtime ===
const ( // === From env ===
// ENV_SHELL which shell operator use in local connector.
ENV_SHELL = "SHELL"
// ENV_EXECUTOR_VERBOSE which verbose use in pipeline pod.
// ENV_EXECUTOR_VERBOSE which verbose use in playbook pod.
ENV_EXECUTOR_VERBOSE = "EXECUTOR_VERBOSE"
// ENV_EXECUTOR_IMAGE which image use in pipeline pod.
// ENV_EXECUTOR_IMAGE which image use in playbook pod.
ENV_EXECUTOR_IMAGE = "EXECUTOR_IMAGE"
// ENV_EXECUTOR_IMAGE_PULLPOLICY which imagePolicy use in pipeline pod.
// ENV_EXECUTOR_IMAGE_PULLPOLICY which imagePolicy use in playbook pod.
ENV_EXECUTOR_IMAGE_PULLPOLICY = "EXECUTOR_IMAGE_PULLPOLICY"
// ENV_EXECUTOR_CLUSTERROLE which clusterrole use in pipeline pod.
// ENV_EXECUTOR_CLUSTERROLE which clusterrole use in playbook pod.
ENV_EXECUTOR_CLUSTERROLE = "EXECUTOR_CLUSTERROLE"
// ENV_CAPKK_GROUP_CONTROLPLANE the control_plane groups for capkk pipeline
// ENV_CAPKK_GROUP_CONTROLPLANE the control_plane groups for capkk playbook
ENV_CAPKK_GROUP_CONTROLPLANE = "CAPKK_GROUP_CONTROLPLANE"
// ENV_CAPKK_GROUP_WORKER the worker groups for capkk pipeline
// ENV_CAPKK_GROUP_WORKER the worker groups for capkk playbook
ENV_CAPKK_GROUP_WORKER = "CAPKK_GROUP_WORKER"
// ENV_CAPKK_VOLUME_BINARY is the binary dir for capkk pipeline. used in offline installer.
// ENV_CAPKK_VOLUME_BINARY is the binary dir for capkk playbook. used in offline installer.
// the value should be a pvc name.
ENV_CAPKK_VOLUME_BINARY = "CAPKK_VOLUME_BINARY"
// ENV_CAPKK_VOLUME_PROJECT is the project dir for capkk pipeline. the default project has contained in IMAGE.
// ENV_CAPKK_VOLUME_PROJECT is the project dir for capkk playbook. the default project has contained in IMAGE.
// the value should be a pvc name.
ENV_CAPKK_VOLUME_PROJECT = "CAPKK_VOLUME_PROJECT"
// ENV_CAPKK_VOLUME_WORKDIR is the workdir for capkk pipeline.
// ENV_CAPKK_VOLUME_WORKDIR is the workdir for capkk playbook.
ENV_CAPKK_VOLUME_WORKDIR = "CAPKK_VOLUME_WORKDIR"
)
const ( // === From CAPKK base on GetCAPKKProject() ===
// CAPKKWorkdir is the work dir for capkk pipeline.
// CAPKKWorkdir is the work dir for capkk playbook.
CAPKKWorkdir = "/kubekey/"
// CAPKKProjectdir is the project dir for capkk pipeline.
// CAPKKProjectdir is the project dir for capkk playbook.
CAPKKProjectdir = "/capkk/project/"
// CAPKKBinarydir is the path of binary.
CAPKKBinarydir = "/capkk/kubekey/"

View File

@ -42,10 +42,10 @@ work_dir/
|
|-- runtime/
|-- group/version/
| | |-- pipelines/
| | |-- playbooks/
| | | |-- namespace/
| | | | |-- pipeline.yaml
| | | | |-- /pipelineName/variable/
| | | | |-- playbook.yaml
| | | | |-- /playbookName/variable/
| | | | | |-- location.json
| | | | | |-- hostname.json
| | |-- tasks/
@ -105,15 +105,15 @@ const BinaryImagesDir = "images"
// RuntimeDir used to store runtime data for the current task execution. By default, its path is set to {{ .work_dir/runtime }}.
const RuntimeDir = "runtime"
// RuntimePipelineDir stores pipeline resources created during pipeline execution.
const RuntimePipelineDir = "pipelines"
// RuntimePlaybookDir stores playbook resources created during playbook execution.
const RuntimePlaybookDir = "playbooks"
// pipeline.yaml contains the data for a pipeline resource.
// playbook.yaml contains the data for a playbook resource.
// RuntimePipelineVariableDir is a fixed directory name under runtime, used to store task execution parameters.
const RuntimePipelineVariableDir = "variable"
// RuntimePlaybookVariableDir is a fixed directory name under runtime, used to store task execution parameters.
const RuntimePlaybookVariableDir = "variable"
// RuntimePipelineTaskDir is a fixed directory name under runtime, used to store the task execution status.
// RuntimePlaybookTaskDir is a fixed directory name under runtime, used to store the task execution status.
// task.yaml contains the data for a task resource.
@ -125,5 +125,5 @@ const RuntimePipelineVariableDir = "variable"
// inventory.yaml contains the data for an inventory resource.
// KubernetesDir represents the remote host directory for each Kubernetes connection created during pipeline execution.
// KubernetesDir represents the remote host directory for each Kubernetes connection created during playbook execution.
const KubernetesDir = "kubernetes"

View File

@ -18,15 +18,14 @@ package core
import (
"context"
"errors"
"os"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
@ -43,30 +42,30 @@ import (
)
const (
// pipelinePodLabel set in pod. value is which pipeline belongs to.
podPipelineLabel = "kubekey.kubesphere.io/pipeline"
// playbookPodLabel set in pod. value is which playbook belongs to.
podPlaybookLabel = "kubekey.kubesphere.io/playbook"
defaultExecutorImage = "docker.io/kubesphere/executor:latest"
executorContainer = "executor"
)
// PipelineReconciler reconcile pipeline
type PipelineReconciler struct {
// PlaybookReconciler reconcile playbook
type PlaybookReconciler struct {
ctrlclient.Client
record.EventRecorder
MaxConcurrentReconciles int
}
var _ options.Controller = &PipelineReconciler{}
var _ reconcile.Reconciler = &PipelineReconciler{}
var _ options.Controller = &PlaybookReconciler{}
var _ reconcile.Reconciler = &PlaybookReconciler{}
// Name implements controllers.controller.
func (r *PipelineReconciler) Name() string {
return "pipeline-reconciler"
func (r *PlaybookReconciler) Name() string {
return "playbook-reconciler"
}
// SetupWithManager implements controllers.controller.
func (r *PipelineReconciler) SetupWithManager(mgr manager.Manager, o options.ControllerManagerServerOptions) error {
func (r *PlaybookReconciler) SetupWithManager(mgr manager.Manager, o options.ControllerManagerServerOptions) error {
r.Client = mgr.GetClient()
r.EventRecorder = mgr.GetEventRecorderFor(r.Name())
@ -74,12 +73,12 @@ func (r *PipelineReconciler) SetupWithManager(mgr manager.Manager, o options.Con
WithOptions(ctrlcontroller.Options{
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
}).
For(&kkcorev1.Pipeline{}).
// Watches pod to sync pipeline.
For(&kkcorev1.Playbook{}).
// Watches pod to sync playbook.
Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj ctrlclient.Object) []reconcile.Request {
pipeline := &kkcorev1.Pipeline{}
if err := util.GetOwnerFromObject(ctx, r.Client, obj, pipeline); err == nil {
return []ctrl.Request{{NamespacedName: ctrlclient.ObjectKeyFromObject(pipeline)}}
playbook := &kkcorev1.Playbook{}
if err := util.GetOwnerFromObject(ctx, r.Client, obj, playbook); err == nil {
return []ctrl.Request{{NamespacedName: ctrlclient.ObjectKeyFromObject(playbook)}}
}
return nil
@ -91,126 +90,125 @@ func (r *PipelineReconciler) SetupWithManager(mgr manager.Manager, o options.Con
// +kubebuilder:rbac:groups="",resources=pods;events,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="coordination.k8s.io",resources=leases,verbs=get;list;watch;create;update;patch;delete
func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) {
// get pipeline
pipeline := &kkcorev1.Pipeline{}
err := r.Client.Get(ctx, req.NamespacedName, pipeline)
if err != nil {
func (r PlaybookReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) {
// get playbook
playbook := &kkcorev1.Playbook{}
if err := r.Client.Get(ctx, req.NamespacedName, playbook); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
return ctrl.Result{}, errors.Wrapf(err, "failed to get playbook %q", req.String())
}
helper, err := patch.NewHelper(pipeline, r.Client)
helper, err := patch.NewHelper(playbook, r.Client)
if err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
defer func() {
if retErr != nil {
if pipeline.Status.FailureReason == "" {
pipeline.Status.FailureReason = kkcorev1.PipelineFailedReasonUnknown
if playbook.Status.FailureReason == "" {
playbook.Status.FailureReason = kkcorev1.PlaybookFailedReasonUnknown
}
pipeline.Status.FailureMessage = retErr.Error()
playbook.Status.FailureMessage = retErr.Error()
}
if err := r.reconcileStatus(ctx, pipeline); err != nil {
retErr = errors.Join(retErr, err)
if err := r.reconcileStatus(ctx, playbook); err != nil {
retErr = errors.Join(retErr, errors.WithStack(err))
}
if err := helper.Patch(ctx, pipeline); err != nil {
retErr = errors.Join(retErr, err)
if err := helper.Patch(ctx, playbook); err != nil {
retErr = errors.Join(retErr, errors.WithStack(err))
}
}()
// Add finalizer first if not set to avoid the race condition between init and delete.
// Note: Finalizers in general can only be added when the deletionTimestamp is not set.
if pipeline.ObjectMeta.DeletionTimestamp.IsZero() && !controllerutil.ContainsFinalizer(pipeline, kkcorev1.PipelineCompletedFinalizer) {
controllerutil.AddFinalizer(pipeline, kkcorev1.PipelineCompletedFinalizer)
if playbook.ObjectMeta.DeletionTimestamp.IsZero() && !controllerutil.ContainsFinalizer(playbook, kkcorev1.PlaybookCompletedFinalizer) {
controllerutil.AddFinalizer(playbook, kkcorev1.PlaybookCompletedFinalizer)
return ctrl.Result{}, nil
}
// Handle deleted clusters
if !pipeline.DeletionTimestamp.IsZero() {
return reconcile.Result{}, r.reconcileDelete(ctx, pipeline)
if !playbook.DeletionTimestamp.IsZero() {
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, playbook))
}
return ctrl.Result{}, r.reconcileNormal(ctx, pipeline)
return ctrl.Result{}, errors.WithStack(r.reconcileNormal(ctx, playbook))
}
func (r PipelineReconciler) reconcileStatus(ctx context.Context, pipeline *kkcorev1.Pipeline) error {
// get pod from pipeline
func (r PlaybookReconciler) reconcileStatus(ctx context.Context, playbook *kkcorev1.Playbook) error {
// get pod from playbook
podList := &corev1.PodList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, pipeline, podList); err != nil {
return err
if err := util.GetObjectListFromOwner(ctx, r.Client, playbook, podList); err != nil {
return errors.Wrapf(err, "failed to get pod list from playbook %q", ctrlclient.ObjectKeyFromObject(playbook))
}
// should only one pod for pipeline
// should only one pod for playbook
if len(podList.Items) != 1 {
return nil
}
if pipeline.Status.Phase != kkcorev1.PipelinePhaseFailed && pipeline.Status.Phase != kkcorev1.PipelinePhaseSucceeded {
if playbook.Status.Phase != kkcorev1.PlaybookPhaseFailed && playbook.Status.Phase != kkcorev1.PlaybookPhaseSucceeded {
switch pod := podList.Items[0]; pod.Status.Phase {
case corev1.PodFailed:
pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
pipeline.Status.FailureReason = kkcorev1.PipelineFailedReasonPodFailed
pipeline.Status.FailureMessage = pod.Status.Message
playbook.Status.Phase = kkcorev1.PlaybookPhaseFailed
playbook.Status.FailureReason = kkcorev1.PlaybookFailedReasonPodFailed
playbook.Status.FailureMessage = pod.Status.Message
for _, cs := range pod.Status.ContainerStatuses {
if cs.Name == executorContainer && cs.State.Terminated != nil {
pipeline.Status.FailureMessage = cs.State.Terminated.Reason + ": " + cs.State.Terminated.Message
playbook.Status.FailureMessage = cs.State.Terminated.Reason + ": " + cs.State.Terminated.Message
}
}
case corev1.PodSucceeded:
pipeline.Status.Phase = kkcorev1.PipelinePhaseSucceeded
playbook.Status.Phase = kkcorev1.PlaybookPhaseSucceeded
default:
// the pipeline status will set by pod
// the playbook status will set by pod
}
}
return nil
}
func (r *PipelineReconciler) reconcileDelete(ctx context.Context, pipeline *kkcorev1.Pipeline) error {
func (r *PlaybookReconciler) reconcileDelete(ctx context.Context, playbook *kkcorev1.Playbook) error {
podList := &corev1.PodList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, pipeline, podList); err != nil {
return err
if err := util.GetObjectListFromOwner(ctx, r.Client, playbook, podList); err != nil {
return errors.Wrapf(err, "failed to get pod list from playbook %q", ctrlclient.ObjectKeyFromObject(playbook))
}
if pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed || pipeline.Status.Phase == kkcorev1.PipelinePhaseSucceeded {
// pipeline has completed. delete the owner pods.
if playbook.Status.Phase == kkcorev1.PlaybookPhaseFailed || playbook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded {
// playbook has completed. delete the owner pods.
for _, obj := range podList.Items {
if err := r.Client.Delete(ctx, &obj); err != nil {
return err
return errors.WithStack(err)
}
}
}
if len(podList.Items) == 0 {
controllerutil.RemoveFinalizer(pipeline, kkcorev1.PipelineCompletedFinalizer)
controllerutil.RemoveFinalizer(playbook, kkcorev1.PlaybookCompletedFinalizer)
}
return nil
}
func (r *PipelineReconciler) reconcileNormal(ctx context.Context, pipeline *kkcorev1.Pipeline) error {
switch pipeline.Status.Phase {
func (r *PlaybookReconciler) reconcileNormal(ctx context.Context, playbook *kkcorev1.Playbook) error {
switch playbook.Status.Phase {
case "":
pipeline.Status.Phase = kkcorev1.PipelinePhasePending
case kkcorev1.PipelinePhasePending:
pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
case kkcorev1.PipelinePhaseRunning:
return r.dealRunningPipeline(ctx, pipeline)
playbook.Status.Phase = kkcorev1.PlaybookPhasePending
case kkcorev1.PlaybookPhasePending:
playbook.Status.Phase = kkcorev1.PlaybookPhaseRunning
case kkcorev1.PlaybookPhaseRunning:
return r.dealRunningPlaybook(ctx, playbook)
}
return nil
}
func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *kkcorev1.Pipeline) error {
func (r *PlaybookReconciler) dealRunningPlaybook(ctx context.Context, playbook *kkcorev1.Playbook) error {
// check if pod is exist
podList := &corev1.PodList{}
if err := r.Client.List(ctx, podList, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingLabels{
podPipelineLabel: pipeline.Name,
if err := r.Client.List(ctx, podList, ctrlclient.InNamespace(playbook.Namespace), ctrlclient.MatchingLabels{
podPlaybookLabel: playbook.Name,
}); err != nil && !apierrors.IsNotFound(err) {
return err
return errors.Wrapf(err, "failed to list pod with label %s=%s", podPlaybookLabel, playbook.Name)
} else if len(podList.Items) != 0 {
// could find exist pod
return nil
@ -218,29 +216,29 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
// create pod
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pipeline.Name + "-",
Namespace: pipeline.Namespace,
GenerateName: playbook.Name + "-",
Namespace: playbook.Namespace,
Labels: map[string]string{
podPipelineLabel: pipeline.Name,
podPlaybookLabel: playbook.Name,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: pipeline.Spec.ServiceAccountName,
ServiceAccountName: playbook.Spec.ServiceAccountName,
RestartPolicy: "Never",
Volumes: pipeline.Spec.Volumes,
Volumes: playbook.Spec.Volumes,
Containers: []corev1.Container{
{
Name: executorContainer,
Image: defaultExecutorImage,
Command: []string{"kk"},
Args: []string{"pipeline",
"--name", pipeline.Name,
"--namespace", pipeline.Namespace},
Args: []string{"playbook",
"--name", playbook.Name,
"--namespace", playbook.Namespace},
SecurityContext: &corev1.SecurityContext{
RunAsUser: ptr.To[int64](0),
RunAsGroup: ptr.To[int64](0),
},
VolumeMounts: pipeline.Spec.VolumeMounts,
VolumeMounts: playbook.Spec.VolumeMounts,
},
},
},
@ -257,11 +255,9 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
if imagePullPolicy := os.Getenv(_const.ENV_EXECUTOR_IMAGE_PULLPOLICY); imagePullPolicy != "" {
pod.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(imagePullPolicy)
}
if err := ctrl.SetControllerReference(pipeline, pod, r.Client.Scheme()); err != nil {
klog.ErrorS(err, "set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return err
if err := ctrl.SetControllerReference(playbook, pod, r.Client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set ownerReference to playbook pod %q", pod.GenerateName)
}
return r.Client.Create(ctx, pod)
return errors.WithStack(r.Client.Create(ctx, pod))
}

View File

@ -2,9 +2,9 @@ package core
import (
"context"
"errors"
"os"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@ -20,7 +20,7 @@ import (
)
const (
// defaultServiceAccountName is the default serviceaccount name for pipeline's executor pod.
// defaultServiceAccountName is the default serviceaccount name for playbook's executor pod.
defaultServiceAccountName = "kubekey-executor"
// defaultServiceAccountName is the default clusterrolebinding name for defaultServiceAccountName.
defaultClusterRoleBindingName = "kubekey-executor"
@ -29,82 +29,81 @@ const (
// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:webhook:mutating=true,name=default.pipeline.kubekey.kubesphere.io,serviceName=kk-webhook-service,serviceNamespace=capkk-system,path=/mutate-kubekey-kubesphere-io-v1-pipeline,failurePolicy=fail,sideEffects=None,groups=kubekey.kubesphere.io,resources=pipelines,verbs=create;update,versions=v1,admissionReviewVersions=v1
// +kubebuilder:webhook:mutating=true,name=default.playbook.kubekey.kubesphere.io,serviceName=kk-webhook-service,serviceNamespace=capkk-system,path=/mutate-kubekey-kubesphere-io-v1-playbook,failurePolicy=fail,sideEffects=None,groups=kubekey.kubesphere.io,resources=playbooks,verbs=create;update,versions=v1,admissionReviewVersions=v1
// PipelineWebhook handles mutating webhooks for Pipelines.
type PipelineWebhook struct {
// PlaybookWebhook handles mutating webhooks for Playbooks.
type PlaybookWebhook struct {
ctrlclient.Client
}
var _ admission.CustomDefaulter = &PipelineWebhook{}
var _ options.Controller = &PipelineWebhook{}
var _ admission.CustomDefaulter = &PlaybookWebhook{}
var _ options.Controller = &PlaybookWebhook{}
// Name implements controllers.Controller.
func (w *PipelineWebhook) Name() string {
return "pipeline-webhook"
func (w *PlaybookWebhook) Name() string {
return "playbook-webhook"
}
// SetupWithManager implements controllers.Controller.
func (w *PipelineWebhook) SetupWithManager(mgr ctrl.Manager, o options.ControllerManagerServerOptions) error {
func (w *PlaybookWebhook) SetupWithManager(mgr ctrl.Manager, o options.ControllerManagerServerOptions) error {
w.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithDefaulter(w).
For(&kkcorev1.Pipeline{}).
For(&kkcorev1.Playbook{}).
Complete()
}
// Default implements admission.CustomDefaulter.
func (w *PipelineWebhook) Default(ctx context.Context, obj runtime.Object) error {
pipeline, ok := obj.(*kkcorev1.Pipeline)
func (w *PlaybookWebhook) Default(ctx context.Context, obj runtime.Object) error {
playbook, ok := obj.(*kkcorev1.Playbook)
if !ok {
return errors.New("cannot convert to pipelines")
return errors.Errorf("failed to convert %q to playbooks", obj.GetObjectKind().GroupVersionKind().String())
}
if pipeline.Spec.ServiceAccountName == "" && os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE) != "" {
if playbook.Spec.ServiceAccountName == "" && os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE) != "" {
// should create default service account in current namespace
if err := w.syncServiceAccount(ctx, pipeline, os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE)); err != nil {
return err
if err := w.syncServiceAccount(ctx, playbook, os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE)); err != nil {
return errors.WithStack(err)
}
pipeline.Spec.ServiceAccountName = defaultServiceAccountName
playbook.Spec.ServiceAccountName = defaultServiceAccountName
}
if pipeline.Spec.ServiceAccountName == "" {
pipeline.Spec.ServiceAccountName = "default"
if playbook.Spec.ServiceAccountName == "" {
playbook.Spec.ServiceAccountName = "default"
}
return nil
}
func (w *PipelineWebhook) syncServiceAccount(ctx context.Context, pipeline *kkcorev1.Pipeline, clusterrole string) error {
func (w *PlaybookWebhook) syncServiceAccount(ctx context.Context, playbook *kkcorev1.Playbook, clusterrole string) error {
// check if clusterrole is exist
cr := &rbacv1.ClusterRole{}
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: clusterrole}, cr); err != nil {
return err
return errors.WithStack(err)
}
// check if the default service account is exist
sa := &corev1.ServiceAccount{}
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: defaultServiceAccountName}, sa); err != nil {
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: playbook.Namespace, Name: defaultServiceAccountName}, sa); err != nil {
if !apierrors.IsNotFound(err) {
return err
return errors.WithStack(err)
}
// create service account if not exist.
sa = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: pipeline.Namespace,
Namespace: playbook.Namespace,
Name: defaultServiceAccountName,
},
}
if err := w.Client.Create(ctx, sa); err != nil {
return err
return errors.WithStack(err)
}
}
// check if the service account is bound to the default cluster role
crb := &rbacv1.ClusterRoleBinding{}
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: defaultClusterRoleBindingName}, crb); err != nil {
if !apierrors.IsNotFound(err) {
return err
return errors.WithStack(err)
}
// create clusterrolebinding
// create clusterrolebinding if not exist
return w.Client.Create(ctx, &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: defaultClusterRoleBindingName,
@ -113,7 +112,7 @@ func (w *PipelineWebhook) syncServiceAccount(ctx context.Context, pipeline *kkco
{
Kind: "ServiceAccount",
Name: defaultServiceAccountName,
Namespace: pipeline.Namespace,
Namespace: playbook.Namespace,
},
},
RoleRef: rbacv1.RoleRef{
@ -124,7 +123,7 @@ func (w *PipelineWebhook) syncServiceAccount(ctx context.Context, pipeline *kkco
}
for _, sj := range crb.Subjects {
if sj.Kind == "ServiceAccount" && sj.Name == defaultServiceAccountName && sj.Namespace == pipeline.Namespace {
if sj.Kind == "ServiceAccount" && sj.Name == defaultServiceAccountName && sj.Namespace == playbook.Namespace {
return nil
}
}
@ -132,8 +131,8 @@ func (w *PipelineWebhook) syncServiceAccount(ctx context.Context, pipeline *kkco
ncrb.Subjects = append(crb.Subjects, rbacv1.Subject{
Kind: "ServiceAccount",
Name: defaultServiceAccountName,
Namespace: pipeline.Namespace,
Namespace: playbook.Namespace,
})
return w.Client.Patch(ctx, ncrb, ctrlclient.MergeFrom(crb))
return errors.WithStack(w.Client.Patch(ctx, ncrb, ctrlclient.MergeFrom(crb)))
}

View File

@ -10,6 +10,6 @@ import (
)
func init() {
utilruntime.Must(options.Register(&PipelineReconciler{}))
utilruntime.Must(options.Register(&PipelineWebhook{}))
utilruntime.Must(options.Register(&PlaybookReconciler{}))
utilruntime.Must(options.Register(&PlaybookWebhook{}))
}

View File

@ -4,6 +4,7 @@ import (
"context"
"os"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
@ -74,19 +75,19 @@ func newClusterScope(ctx context.Context, client ctrlclient.Client, clusterReq r
scope.Cluster = &clusterv1beta1.Cluster{}
if err := client.Get(ctx, scope.NamespacedName, scope.Cluster); err != nil {
// must hve scope
return scope, err
return scope, errors.Wrapf(err, "failed to get cluster with scope %q", scope.String())
}
// KKCluster
if err := client.Get(ctx, ctrlclient.ObjectKey{
Namespace: scope.Cluster.Spec.InfrastructureRef.Namespace,
Name: scope.Cluster.Spec.InfrastructureRef.Name,
}, scope.KKCluster); err != nil {
return scope, err
return scope, errors.Wrapf(err, "failed to get kkcluster with scope %q", scope.String())
}
// ControlPlane
gv, err := schema.ParseGroupVersion(scope.Cluster.Spec.ControlPlaneRef.APIVersion)
if err != nil {
return scope, err
return scope, errors.Wrapf(err, "failed to get group version with scope %q", scope.String())
}
scope.ControlPlane.SetGroupVersionKind(schema.GroupVersionKind{
Group: gv.Group,
@ -97,7 +98,7 @@ func newClusterScope(ctx context.Context, client ctrlclient.Client, clusterReq r
Namespace: scope.Cluster.Spec.ControlPlaneRef.Namespace,
Name: scope.Cluster.Spec.ControlPlaneRef.Name,
}, scope.ControlPlane); err != nil && !apierrors.IsNotFound(err) {
return scope, err
return scope, errors.Wrapf(err, "failed to get control-plane with scope %q", scope.String())
}
// MachineDeployment
mdlist := &clusterv1beta1.MachineDeploymentList{}
@ -116,7 +117,7 @@ func newClusterScope(ctx context.Context, client ctrlclient.Client, clusterReq r
func (p *clusterScope) newPatchHelper(obj ...ctrlclient.Object) error {
helper, err := util.NewPatchHelper(p.client, obj...)
if err != nil {
return err
return errors.Wrapf(err, "failed to create patch helper with scope %q", p.String())
}
p.PatchHelper = helper
@ -127,15 +128,15 @@ func (p *clusterScope) isPaused() bool {
return clusterannotations.IsPaused(p.Cluster, p.KKCluster)
}
// checkIfPipelineCompleted determines if all pipelines associated with the given owner are completed.
// At any given time, there should be at most one pipeline running for each owner.
func (p *clusterScope) ifPipelineCompleted(ctx context.Context, owner ctrlclient.Object) (bool, error) {
pipelineList := &kkcorev1.PipelineList{}
if err := util.GetObjectListFromOwner(ctx, p.client, owner, pipelineList); err != nil {
return false, err
// checkIfPlaybookCompleted determines if all playbooks associated with the given owner are completed.
// At any given time, there should be at most one playbook running for each owner.
func (p *clusterScope) ifPlaybookCompleted(ctx context.Context, owner ctrlclient.Object) (bool, error) {
playbookList := &kkcorev1.PlaybookList{}
if err := util.GetObjectListFromOwner(ctx, p.client, owner, playbookList); err != nil {
return false, errors.Wrapf(err, "failed to get playbook list from owner %q", ctrlclient.ObjectKeyFromObject(owner))
}
for _, pipeline := range pipelineList.Items {
if pipeline.Status.Phase != kkcorev1.PipelinePhaseFailed && pipeline.Status.Phase != kkcorev1.PipelinePhaseSucceeded {
for _, playbook := range playbookList.Items {
if playbook.Status.Phase != kkcorev1.PlaybookPhaseFailed && playbook.Status.Phase != kkcorev1.PlaybookPhaseSucceeded {
return false, nil
}
}

View File

@ -2,9 +2,9 @@ package infrastructure
import (
"context"
"errors"
"fmt"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -68,9 +68,9 @@ func (r *InventoryReconciler) SetupWithManager(mgr manager.Manager, o options.Co
For(&kkcorev1.Inventory{}).
// Watches kkmachine to sync group.
Watches(&capkkinfrav1beta1.KKMachine{}, handler.EnqueueRequestsFromMapFunc(r.objectToInventoryMapFunc)).
// Watch Pipeline to sync inventory status.
Watches(&kkcorev1.Pipeline{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj ctrlclient.Object) []ctrl.Request {
// only need host check pipeline.
// Watch Playbook to sync inventory status.
Watches(&kkcorev1.Playbook{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj ctrlclient.Object) []ctrl.Request {
// only need host check playbook.
inventory := &kkcorev1.Inventory{}
if err := util.GetOwnerFromObject(ctx, r.Client, obj, inventory); err == nil {
return []ctrl.Request{{NamespacedName: ctrlclient.ObjectKeyFromObject(inventory)}}
@ -113,11 +113,11 @@ func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Reque
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
return ctrl.Result{}, errors.Wrapf(err, "failed to get inventory %q", req.String())
}
clusterName := inventory.Labels[clusterv1beta1.ClusterNameLabel]
if clusterName == "" {
klog.V(5).InfoS("inventory is not belong cluster", "inventory", req.String())
klog.V(5).InfoS("inventory is not belong cluster. skip", "inventory", req.String())
return ctrl.Result{}, nil
}
@ -126,14 +126,14 @@ func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Reque
Name: clusterName,
}})
if err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
if err := scope.newPatchHelper(scope.Inventory); err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
defer func() {
if err := scope.PatchHelper.Patch(ctx, scope.Inventory); err != nil {
retErr = errors.Join(retErr, err)
retErr = errors.Join(retErr, errors.WithStack(err))
}
}()
@ -154,21 +154,21 @@ func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Reque
// Handle deleted inventory
if !scope.Inventory.DeletionTimestamp.IsZero() {
return ctrl.Result{}, r.reconcileDelete(ctx, scope)
return ctrl.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope))
}
return ctrl.Result{}, r.reconcileNormal(ctx, scope)
return ctrl.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope))
}
func (r *InventoryReconciler) reconcileDelete(ctx context.Context, scope *clusterScope) error {
// waiting pipeline delete
pipelineList := &kkcorev1.PipelineList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.Inventory, pipelineList); err != nil {
return err
// waiting playbook delete
playbookList := &kkcorev1.PlaybookList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.Inventory, playbookList); err != nil {
return errors.Wrapf(err, "failed to get playbook list from inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
for _, obj := range pipelineList.Items {
for _, obj := range playbookList.Items {
if err := r.Client.Delete(ctx, &obj); err != nil {
return err
return errors.Wrapf(err, "failed to delete playbook %q", ctrlclient.ObjectKeyFromObject(&obj))
}
}
// delete kkmachine for machine deployment
@ -176,11 +176,11 @@ func (r *InventoryReconciler) reconcileDelete(ctx context.Context, scope *cluste
if err := r.Client.List(ctx, mdList, ctrlclient.MatchingLabels{
clusterv1beta1.ClusterNameLabel: scope.Name,
}, ctrlclient.HasLabels{clusterv1beta1.MachineDeploymentNameLabel}); err != nil {
return err
return errors.Wrapf(err, "failed to list machineDeployment with label %s=%s", clusterv1beta1.ClusterNameLabel, scope.Name)
}
for _, obj := range mdList.Items {
if err := r.Client.Delete(ctx, &obj); err != nil {
return err
return errors.Wrapf(err, "failed to delete machineDeployment %q", ctrlclient.ObjectKeyFromObject(&obj))
}
}
if len(mdList.Items) != 0 {
@ -192,15 +192,15 @@ func (r *InventoryReconciler) reconcileDelete(ctx context.Context, scope *cluste
if err := r.Client.List(ctx, cpList, ctrlclient.MatchingLabels{
clusterv1beta1.ClusterNameLabel: scope.Name,
}, ctrlclient.HasLabels{clusterv1beta1.MachineControlPlaneNameLabel}); err != nil {
return err
return errors.Wrapf(err, "failed to list machineControlPlane with label %q", clusterv1beta1.ClusterNameLabel, scope.Name)
}
for _, obj := range cpList.Items {
if err := r.Client.Delete(ctx, &obj); err != nil {
return err
return errors.Wrapf(err, "failed to delete machineControlPlane %q", ctrlclient.ObjectKeyFromObject(&obj))
}
}
if len(pipelineList.Items) == 0 && len(mdList.Items) == 0 && len(cpList.Items) == 0 {
if len(playbookList.Items) == 0 && len(mdList.Items) == 0 && len(cpList.Items) == 0 {
// Delete finalizer.
controllerutil.RemoveFinalizer(scope.Inventory, kkcorev1.InventoryCAPKKFinalizer)
}
@ -218,14 +218,14 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
// when it's empty: inventory is first created.
// when it's pending: inventory's host haved changed.
scope.Inventory.Status.Ready = false
if err := r.createHostCheckPipeline(ctx, scope); err != nil {
return err
if err := r.createHostCheckPlaybook(ctx, scope); err != nil {
return errors.Wrapf(err, "failed to create host check playbook in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
scope.Inventory.Status.Phase = kkcorev1.InventoryPhaseRunning
case kkcorev1.InventoryPhaseRunning:
// sync inventory's status from pipeline
if err := r.reconcileInventoryPipeline(ctx, scope); err != nil {
return err
// sync inventory's status from playbook
if err := r.reconcileInventoryPlaybook(ctx, scope); err != nil {
return errors.Wrapf(err, "failed to reconcile running inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
case kkcorev1.InventoryPhaseSucceeded:
// sync inventory's control_plane groups from ControlPlane
@ -234,7 +234,7 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
if scope.KKCluster.Spec.Tolerate {
scope.Inventory.Status.Ready = true
}
if scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation] == "" {
if scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation] == "" {
// change to pending
scope.Inventory.Status.Phase = kkcorev1.InventoryPhasePending
}
@ -245,11 +245,11 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
scope.Inventory.Spec.Groups = make(map[string]kkcorev1.InventoryGroup)
}
if err := r.syncInventoryControlPlaneGroups(ctx, scope); err != nil {
return err
return errors.Wrapf(err, "failed to sync control-plane groups in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
// sync inventory's worker groups from machinedeployment
if err := r.syncInventoryWorkerGroups(ctx, scope); err != nil {
return err
return errors.Wrapf(err, "failed to sync worker groups in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
scope.Inventory.Spec.Groups[defaultClusterGroup] = kkcorev1.InventoryGroup{
Groups: []string{getControlPlaneGroupName(), getWorkerGroupName()},
@ -259,37 +259,37 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
return nil
}
func (r *InventoryReconciler) reconcileInventoryPipeline(ctx context.Context, scope *clusterScope) error {
// get pipeline from inventory
if scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation] == "" {
func (r *InventoryReconciler) reconcileInventoryPlaybook(ctx context.Context, scope *clusterScope) error {
// get playbook from inventory
if scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation] == "" {
return nil
}
pipeline := &kkcorev1.Pipeline{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Name: scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation], Namespace: scope.Namespace}, pipeline); err != nil {
playbook := &kkcorev1.Playbook{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Name: scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation], Namespace: scope.Namespace}, playbook); err != nil {
if apierrors.IsNotFound(err) {
return r.createHostCheckPipeline(ctx, scope)
return r.createHostCheckPlaybook(ctx, scope)
}
return err
return errors.Wrapf(err, "failed to get playbook with inventory %q annotation %q", ctrlclient.ObjectKeyFromObject(scope.Inventory), kkcorev1.HostCheckPlaybookAnnotation)
}
switch pipeline.Status.Phase {
case kkcorev1.PipelinePhaseSucceeded:
switch playbook.Status.Phase {
case kkcorev1.PlaybookPhaseSucceeded:
scope.Inventory.Status.Phase = kkcorev1.InventoryPhaseSucceeded
case kkcorev1.PipelinePhaseFailed:
case kkcorev1.PlaybookPhaseFailed:
scope.Inventory.Status.Phase = kkcorev1.InventoryPhaseFailed
}
return nil
}
// createHostCheckPipeline if inventory hosts is reachable.
func (r *InventoryReconciler) createHostCheckPipeline(ctx context.Context, scope *clusterScope) error {
if ok, _ := scope.ifPipelineCompleted(ctx, scope.Inventory); !ok {
// createHostCheckPlaybook if inventory hosts is reachable.
func (r *InventoryReconciler) createHostCheckPlaybook(ctx context.Context, scope *clusterScope) error {
if ok, _ := scope.ifPlaybookCompleted(ctx, scope.Inventory); !ok {
return nil
}
// todo when install offline. should mount workdir to pipeline.
// todo when install offline. should mount workdir to playbook.
volumes, volumeMounts := scope.getVolumeMounts(ctx)
pipeline := &kkcorev1.Pipeline{
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: scope.Inventory.Name + "-",
Namespace: scope.Namespace,
@ -297,8 +297,8 @@ func (r *InventoryReconciler) createHostCheckPipeline(ctx context.Context, scope
clusterv1beta1.ClusterNameLabel: scope.Name,
},
},
Spec: kkcorev1.PipelineSpec{
Project: kkcorev1.PipelineProject{
Spec: kkcorev1.PlaybookSpec{
Project: kkcorev1.PlaybookProject{
Addr: _const.CAPKKProjectdir,
},
Playbook: _const.CAPKKPlaybookHostCheck,
@ -312,17 +312,17 @@ func (r *InventoryReconciler) createHostCheckPipeline(ctx context.Context, scope
Volumes: volumes,
},
}
if err := ctrl.SetControllerReference(scope.Inventory, pipeline, r.Client.Scheme()); err != nil {
return err
if err := ctrl.SetControllerReference(scope.Inventory, playbook, r.Client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set ownerReference of inventory %q to playbook", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
if err := r.Create(ctx, pipeline); err != nil {
return err
if err := r.Create(ctx, playbook); err != nil {
return errors.Wrapf(err, "failed to create playbook use inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
}
if scope.Inventory.Annotations == nil {
scope.Inventory.Annotations = make(map[string]string)
}
scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation] = pipeline.Name
scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation] = playbook.Name
return nil
}
@ -331,18 +331,18 @@ func (r *InventoryReconciler) createHostCheckPipeline(ctx context.Context, scope
func (r *InventoryReconciler) syncInventoryControlPlaneGroups(ctx context.Context, scope *clusterScope) error {
groupNum, _, err := unstructured.NestedInt64(scope.ControlPlane.Object, "spec", "replicas")
if err != nil {
return fmt.Errorf("failed to get replicas from controlPlane %q in cluster %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane), scope.String())
return errors.Wrapf(err, "failed to get replicas from controlPlane %q in cluster %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane), scope.String())
}
// Ensure the control plane group's replica count is singular. because etcd is deploy in controlPlane.
// todo: now we only support internal etcd groups.
if groupNum%2 != 1 {
return fmt.Errorf("controlPlane %q replicas must be singular in cluster %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane), scope.String())
return errors.Errorf("controlPlane %q replicas must be singular in cluster %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane), scope.String())
}
// get machineList from controlPlane
machineList := &clusterv1beta1.MachineList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.ControlPlane, machineList); err != nil {
return err
return errors.Wrapf(err, "failed to get machineList from controlPlane %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane))
}
if len(machineList.Items) != int(groupNum) {
klog.Info("waiting machine synced.")
@ -355,7 +355,7 @@ func (r *InventoryReconciler) syncInventoryControlPlaneGroups(ctx context.Contex
if err := r.Client.List(ctx, kkmachineList, ctrlclient.MatchingLabels{
clusterv1beta1.MachineControlPlaneNameLabel: scope.ControlPlane.GetName(),
}); err != nil {
return err
return errors.Wrapf(err, "failed to get kkMachineList with label %s=%s", clusterv1beta1.MachineControlPlaneNameLabel, scope.ControlPlane.GetName())
}
for _, kkmachine := range kkmachineList.Items {
if kkmachine.Spec.ProviderID != nil {
@ -384,7 +384,7 @@ func (r *InventoryReconciler) syncInventoryWorkerGroups(ctx context.Context, sco
if err := r.Client.List(ctx, machineList, ctrlclient.MatchingLabels{
clusterv1beta1.MachineDeploymentNameLabel: scope.MachineDeployment.Name,
}); err != nil {
return err
return errors.Wrapf(err, "failed to get machineList with label %s=%s", clusterv1beta1.MachineDeploymentNameLabel, scope.MachineDeployment.Name)
}
if len(machineList.Items) != int(groupNum) {
klog.Info("waiting machine synced.")
@ -397,7 +397,7 @@ func (r *InventoryReconciler) syncInventoryWorkerGroups(ctx context.Context, sco
if err := r.Client.List(ctx, kkmachineList, ctrlclient.MatchingLabels{
clusterv1beta1.MachineDeploymentNameLabel: scope.MachineDeployment.Name,
}); err != nil {
return err
return errors.Wrapf(err, "failed to get kkmachineList with label %s=%s", clusterv1beta1.MachineDeploymentNameLabel, scope.MachineDeployment.Name)
}
for _, kkmachine := range kkmachineList.Items {
if kkmachine.Spec.ProviderID != nil {
@ -429,7 +429,7 @@ func (r *InventoryReconciler) setProviderID(ctx context.Context, clusterName str
}
kkmachine.Spec.ProviderID = _const.Host2ProviderID(clusterName, host)
if err := r.Client.Update(ctx, &kkmachine); err != nil {
return err
return errors.Wrapf(err, "failed to set provider to kkmachine %q", ctrlclient.ObjectKeyFromObject(&kkmachine))
}
}
}

View File

@ -2,10 +2,10 @@ package infrastructure
import (
"context"
"errors"
"reflect"
"strings"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -90,11 +90,11 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkcluster %q", req.String())
}
clusterName := kkcluster.Labels[clusterv1beta1.ClusterNameLabel]
if clusterName == "" {
klog.V(5).InfoS("inventory is not belong cluster", "inventory", req.String())
klog.V(5).InfoS("kkcluster is not belong cluster. skip", "inventory", req.String())
return ctrl.Result{}, nil
}
@ -103,10 +103,10 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
Name: clusterName,
}})
if err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
if err := scope.newPatchHelper(scope.KKCluster, scope.Inventory); err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
defer func() {
if retErr != nil {
@ -116,10 +116,10 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
scope.KKCluster.Status.FailureMessage = retErr.Error()
}
if err := r.reconcileStatus(ctx, scope); err != nil {
retErr = errors.Join(retErr, err)
retErr = errors.Join(retErr, errors.WithStack(err))
}
if err := scope.PatchHelper.Patch(ctx, scope.KKCluster, scope.Inventory); err != nil {
retErr = errors.Join(retErr, err)
retErr = errors.Join(retErr, errors.WithStack(err))
}
}()
@ -140,11 +140,11 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// Handle deleted clusters
if !scope.KKCluster.DeletionTimestamp.IsZero() {
return reconcile.Result{}, r.reconcileDelete(ctx, scope)
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope))
}
// Handle non-deleted clusters
return reconcile.Result{}, r.reconcileNormal(ctx, scope)
return reconcile.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope))
}
// reconcileDelete delete cluster
@ -152,11 +152,11 @@ func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, scope *cluste
// waiting inventory deleted
inventoryList := &kkcorev1.InventoryList{}
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.KKCluster, inventoryList); err != nil {
return err
return errors.Wrapf(err, "failed to get inventoryList from kkcluster %q", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
}
for _, obj := range inventoryList.Items {
if err := r.Client.Delete(ctx, &obj); err != nil {
return err
return errors.Wrapf(err, "failed to delete inventory %q", ctrlclient.ObjectKeyFromObject(&obj))
}
}
@ -174,7 +174,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, scope *cluste
if err != nil { // cannot convert kkcluster to inventory. may be kkcluster is not valid.
scope.KKCluster.Status.FailureReason = capkkinfrav1beta1.KKClusterFailedInvalidHosts
return err
return errors.Wrapf(err, "failed to convert kkcluster %q to inventoryHost", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
}
// if inventory is not exist. create it
if scope.Inventory.Name == "" {
@ -191,7 +191,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, scope *cluste
},
}
if err := ctrl.SetControllerReference(scope.KKCluster, scope.Inventory, r.Scheme); err != nil {
return err
return errors.Wrapf(err, "failed to set ownerReference from kkcluster %q to inventory", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
}
return r.Client.Create(ctx, scope.Inventory)
@ -224,7 +224,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, scope *cluste
if scope.Inventory.Annotations == nil {
scope.Inventory.Annotations = make(map[string]string)
}
scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation] = ""
scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation] = ""
scope.Inventory.Status.Phase = kkcorev1.InventoryPhasePending
scope.Inventory.Status.Ready = false
@ -235,12 +235,12 @@ func (r *KKClusterReconciler) reconcileStatus(ctx context.Context, scope *cluste
// sync KKClusterNodeReachedCondition.
switch scope.Inventory.Status.Phase {
case kkcorev1.InventoryPhasePending:
conditions.MarkUnknown(scope.KKCluster, capkkinfrav1beta1.KKClusterNodeReachedCondition, capkkinfrav1beta1.KKClusterNodeReachedConditionReasonWaiting, "waiting for inventory host check pipeline.")
conditions.MarkUnknown(scope.KKCluster, capkkinfrav1beta1.KKClusterNodeReachedCondition, capkkinfrav1beta1.KKClusterNodeReachedConditionReasonWaiting, "waiting for inventory host check playbook.")
case kkcorev1.InventoryPhaseSucceeded:
conditions.MarkTrue(scope.KKCluster, capkkinfrav1beta1.KKClusterNodeReachedCondition)
case kkcorev1.InventoryPhaseFailed:
conditions.MarkFalse(scope.KKCluster, capkkinfrav1beta1.KKClusterNodeReachedCondition, capkkinfrav1beta1.KKClusterNodeReachedConditionReasonUnreached, clusterv1beta1.ConditionSeverityError,
"inventory host check pipeline %q run failed", scope.Inventory.Annotations[kkcorev1.HostCheckPipelineAnnotation])
"inventory host check playbook %q run failed", scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation])
}
// after inventory is ready. continue create cluster
@ -252,7 +252,7 @@ func (r *KKClusterReconciler) reconcileStatus(ctx context.Context, scope *cluste
if err := r.Client.List(ctx, kkmachineList, ctrlclient.MatchingLabels{
clusterv1beta1.ClusterNameLabel: scope.Name,
}); err != nil {
return err
return errors.Wrapf(err, "failed to get kkMachineList with label %s=%s", clusterv1beta1.ClusterNameLabel, scope.Name)
}
// sync kkmachine status to kkcluster
@ -271,7 +271,7 @@ func (r *KKClusterReconciler) reconcileStatus(ctx context.Context, scope *cluste
cpn, _, err := unstructured.NestedInt64(scope.ControlPlane.Object, "spec", "replicas")
if err != nil {
return err
return errors.Wrapf(err, "failed to get replicas from machineControlPlane %s", ctrlclient.ObjectKeyFromObject(scope.ControlPlane))
}
mdn := int(ptr.Deref(scope.MachineDeployment.Spec.Replicas, 0))
if scope.KKCluster.Status.Ready && scope.KKCluster.Status.FailureReason == "" &&

View File

@ -2,8 +2,8 @@ package infrastructure
import (
"context"
"errors"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"

View File

@ -3,10 +3,10 @@ package infrastructure
import (
"context"
"embed"
"errors"
"fmt"
"time"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
corev1 "k8s.io/api/core/v1"
@ -33,7 +33,7 @@ import (
)
// KKMachineReconciler reconciles a KKMachine object.
// One KKMachine should have one Pipeline running in time.
// One KKMachine should have one Playbook running in time.
type KKMachineReconciler struct {
ctrlclient.Client
record.EventRecorder
@ -69,8 +69,8 @@ func (r *KKMachineReconciler) SetupWithManager(mgr ctrl.Manager, o options.Contr
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
}).
For(&capkkinfrav1beta1.KKMachine{}).
// Watches pipeline to sync kkmachine.
Watches(&kkcorev1.Pipeline{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj ctrlclient.Object) []reconcile.Request {
// Watches playbook to sync kkmachine.
Watches(&kkcorev1.Playbook{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj ctrlclient.Object) []reconcile.Request {
kkmachine := &capkkinfrav1beta1.KKMachine{}
if err := util.GetOwnerFromObject(ctx, r.Client, obj, kkmachine); err == nil {
return []ctrl.Request{{NamespacedName: ctrlclient.ObjectKeyFromObject(kkmachine)}}
@ -89,7 +89,7 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkmachine %q", req.String())
}
clusterName := kkmachine.Labels[clusterv1beta1.ClusterNameLabel]
if clusterName == "" {
@ -102,14 +102,14 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
Name: clusterName,
}})
if err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
if err := scope.newPatchHelper(kkmachine); err != nil {
return ctrl.Result{}, err
return ctrl.Result{}, errors.WithStack(err)
}
defer func() {
if err := scope.PatchHelper.Patch(ctx, kkmachine); err != nil {
retErr = errors.Join(retErr, err)
retErr = errors.Join(retErr, errors.WithStack(err))
}
}()
@ -129,12 +129,12 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
}
if !kkmachine.DeletionTimestamp.IsZero() {
return reconcile.Result{}, r.reconcileDelete(ctx, scope, kkmachine)
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope, kkmachine))
}
machine := &clusterv1beta1.Machine{}
if err := util.GetOwnerFromObject(ctx, r.Client, kkmachine, machine); err != nil {
return reconcile.Result{}, err
return reconcile.Result{}, errors.Wrapf(err, "failed to get machine from kkmachine %q", ctrlclient.ObjectKeyFromObject(machine))
}
kkmachine.Spec.Version = machine.Spec.Version
@ -150,44 +150,44 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}
return reconcile.Result{}, r.reconcileNormal(ctx, scope, kkmachine, machine)
return reconcile.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope, kkmachine, machine))
}
// reconcileDelete handles delete reconcile.
func (r *KKMachineReconciler) reconcileDelete(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine) error {
// check if addNodePipeline has created
addNodePipelineName := kkmachine.Annotations[capkkinfrav1beta1.AddNodePipelineAnnotation]
delNodePipelineName := kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePipelineAnnotation]
addNodePipeline, delNodePipeline, err := r.getPipeline(ctx, scope, kkmachine)
// check if addNodePlaybook has created
addNodePlaybookName := kkmachine.Annotations[capkkinfrav1beta1.AddNodePlaybookAnnotation]
delNodePlaybookName := kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePlaybookAnnotation]
addNodePlaybook, delNodePlaybook, err := r.getPlaybook(ctx, scope, kkmachine)
if err != nil {
return err
return errors.WithStack(err)
}
switch {
case addNodePipelineName == "" && delNodePipelineName == "":
// the kkmachine has not executor any pipeline, delete direct.
case addNodePlaybookName == "" && delNodePlaybookName == "":
// the kkmachine has not executor any playbook, delete direct.
controllerutil.RemoveFinalizer(kkmachine, capkkinfrav1beta1.KKMachineFinalizer)
case addNodePipelineName != "" && delNodePipelineName == "":
// should waiting addNodePipeline completed and create deleteNodePipeline
if addNodePipeline == nil || // addNodePipeline has been deleted
(addNodePipeline.Status.Phase == kkcorev1.PipelinePhaseSucceeded || addNodePipeline.Status.Phase == kkcorev1.PipelinePhaseFailed) { // addNodePipeline has completed
return r.createDeleteNodePipeline(ctx, scope, kkmachine)
case addNodePlaybookName != "" && delNodePlaybookName == "":
// should waiting addNodePlaybook completed and create deleteNodePlaybook
if addNodePlaybook == nil || // addNodePlaybook has been deleted
(addNodePlaybook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded || addNodePlaybook.Status.Phase == kkcorev1.PlaybookPhaseFailed) { // addNodePlaybook has completed
return r.createDeleteNodePlaybook(ctx, scope, kkmachine)
}
// should waiting addNodePipeline completed
// should waiting addNodePlaybook completed
return nil
case addNodePipelineName != "" && delNodePipelineName != "":
if addNodePipeline != nil && addNodePipeline.DeletionTimestamp.IsZero() {
return r.Client.Delete(ctx, addNodePipeline)
case addNodePlaybookName != "" && delNodePlaybookName != "":
if addNodePlaybook != nil && addNodePlaybook.DeletionTimestamp.IsZero() {
return r.Client.Delete(ctx, addNodePlaybook)
}
if delNodePipeline != nil && delNodePipeline.DeletionTimestamp.IsZero() {
if delNodePipeline.Status.Phase == kkcorev1.PipelinePhaseSucceeded {
return r.Client.Delete(ctx, delNodePipeline)
if delNodePlaybook != nil && delNodePlaybook.DeletionTimestamp.IsZero() {
if delNodePlaybook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded {
return r.Client.Delete(ctx, delNodePlaybook)
}
// should waiting delNodePipeline completed
// should waiting delNodePlaybook completed
return nil
}
}
if addNodePipeline == nil && delNodePipeline == nil {
if addNodePlaybook == nil && delNodePlaybook == nil {
// Delete finalizer.
controllerutil.RemoveFinalizer(kkmachine, capkkinfrav1beta1.KKMachineFinalizer)
}
@ -195,76 +195,76 @@ func (r *KKMachineReconciler) reconcileDelete(ctx context.Context, scope *cluste
return nil
}
// getPipeline get addNodePipeline and delNodePipeline from kkmachine.Annotations.
func (r *KKMachineReconciler) getPipeline(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine) (*kkcorev1.Pipeline, *kkcorev1.Pipeline, error) {
var addNodePipeline, delNodePipeline *kkcorev1.Pipeline
if name, ok := kkmachine.Annotations[capkkinfrav1beta1.AddNodePipelineAnnotation]; ok && name != "" {
addNodePipeline = &kkcorev1.Pipeline{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: name}, addNodePipeline); err != nil {
// getPlaybook get addNodePlaybook and delNodePlaybook from kkmachine.Annotations.
func (r *KKMachineReconciler) getPlaybook(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine) (*kkcorev1.Playbook, *kkcorev1.Playbook, error) {
var addNodePlaybook, delNodePlaybook *kkcorev1.Playbook
if name, ok := kkmachine.Annotations[capkkinfrav1beta1.AddNodePlaybookAnnotation]; ok && name != "" {
addNodePlaybook = &kkcorev1.Playbook{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: name}, addNodePlaybook); err != nil {
if !apierrors.IsNotFound(err) {
// maybe delete by user. skip
return nil, nil, err
return nil, nil, errors.Wrapf(err, "failed to get addNode playbook from kkmachine %q with annotation %q", ctrlclient.ObjectKeyFromObject(kkmachine), capkkinfrav1beta1.AddNodePlaybookAnnotation)
}
addNodePipeline = nil
addNodePlaybook = nil
}
}
if name, ok := kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePipelineAnnotation]; ok && name != "" {
delNodePipeline = &kkcorev1.Pipeline{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: name}, delNodePipeline); err != nil {
if name, ok := kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePlaybookAnnotation]; ok && name != "" {
delNodePlaybook = &kkcorev1.Playbook{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: name}, delNodePlaybook); err != nil {
if !apierrors.IsNotFound(err) {
// maybe delete by user. skip
return nil, nil, err
return nil, nil, errors.Wrapf(err, "failed to get delNode playbook from kkmachine %q with annotation %q", ctrlclient.ObjectKeyFromObject(kkmachine), capkkinfrav1beta1.DeleteNodePlaybookAnnotation)
}
delNodePipeline = nil
delNodePlaybook = nil
}
}
return addNodePipeline, delNodePipeline, nil
return addNodePlaybook, delNodePlaybook, nil
}
// reconcileNormal handles normal reconcile.
// when dataSecret or certificates files changed. KCP will RollingUpdate machine (create new machines to replace old machines)
// so the sync file should contains in add_node pipeline.
// so the sync file should contains in add_node playbook.
func (r *KKMachineReconciler) reconcileNormal(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine, machine *clusterv1beta1.Machine) error {
pipelineName := kkmachine.Annotations[capkkinfrav1beta1.AddNodePipelineAnnotation]
if pipelineName == "" {
playbookName := kkmachine.Annotations[capkkinfrav1beta1.AddNodePlaybookAnnotation]
if playbookName == "" {
kkmachine.Status.Ready = false
kkmachine.Status.FailureReason = ""
kkmachine.Status.FailureMessage = ""
// should create pipeline
return r.createAddNodePipeline(ctx, scope, kkmachine, machine)
// should create playbook
return r.createAddNodePlaybook(ctx, scope, kkmachine, machine)
}
// check pipeline status
pipeline := &kkcorev1.Pipeline{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: pipelineName}, pipeline); err != nil {
// check playbook status
playbook := &kkcorev1.Playbook{}
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: playbookName}, playbook); err != nil {
if apierrors.IsNotFound(err) {
// the pipeline has not found.
r.EventRecorder.Eventf(kkmachine, corev1.EventTypeWarning, "AddNodeFailed", "add node pipeline: %q not found", pipelineName)
// the playbook has not found.
r.EventRecorder.Eventf(kkmachine, corev1.EventTypeWarning, "AddNodeFailed", "add node playbook: %q not found", playbookName)
return nil
}
return err
return errors.Wrapf(err, "failed to get playbook %s/%s", scope.Namespace, playbookName)
}
switch pipeline.Status.Phase {
case kkcorev1.PipelinePhaseSucceeded:
switch playbook.Status.Phase {
case kkcorev1.PlaybookPhaseSucceeded:
// set machine to ready
kkmachine.Status.Ready = true
kkmachine.Status.FailureReason = ""
kkmachine.Status.FailureMessage = ""
case kkcorev1.PipelinePhaseFailed:
case kkcorev1.PlaybookPhaseFailed:
// set machine to not ready
kkmachine.Status.Ready = false
kkmachine.Status.FailureReason = capkkinfrav1beta1.KKMachineFailedReasonAddNodeFailed
kkmachine.Status.FailureMessage = fmt.Sprintf("add_node pipeline %q run failed", pipelineName)
kkmachine.Status.FailureMessage = fmt.Sprintf("add_node playbook %q run failed", playbookName)
}
return nil
}
func (r *KKMachineReconciler) createAddNodePipeline(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine, machine *clusterv1beta1.Machine) error {
if ok, _ := scope.ifPipelineCompleted(ctx, kkmachine); !ok {
func (r *KKMachineReconciler) createAddNodePlaybook(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine, machine *clusterv1beta1.Machine) error {
if ok, _ := scope.ifPlaybookCompleted(ctx, kkmachine); !ok {
return nil
}
volumes, volumeMounts := scope.getVolumeMounts(ctx)
@ -287,7 +287,7 @@ func (r *KKMachineReconciler) createAddNodePipeline(ctx context.Context, scope *
if err != nil {
klog.ErrorS(err, "get default config error, use default config", "version", kkmachine.Spec.Version)
}
pipeline := &kkcorev1.Pipeline{
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: kkmachine.Name + "-",
Namespace: scope.Namespace,
@ -295,8 +295,8 @@ func (r *KKMachineReconciler) createAddNodePipeline(ctx context.Context, scope *
clusterv1beta1.ClusterNameLabel: scope.Name,
},
},
Spec: kkcorev1.PipelineSpec{
Project: kkcorev1.PipelineProject{
Spec: kkcorev1.PlaybookSpec{
Project: kkcorev1.PlaybookProject{
Addr: _const.CAPKKProjectdir,
},
Playbook: _const.CAPKKPlaybookAddNode,
@ -306,20 +306,20 @@ func (r *KKMachineReconciler) createAddNodePipeline(ctx context.Context, scope *
Volumes: volumes,
},
}
if err := ctrl.SetControllerReference(kkmachine, pipeline, r.Client.Scheme()); err != nil {
return err
if err := ctrl.SetControllerReference(kkmachine, playbook, r.Client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set ownerReference from kkmachine %q to addNode playbook", ctrlclient.ObjectKeyFromObject(kkmachine))
}
if err := r.Client.Create(ctx, pipeline); err != nil {
return err
if err := r.Client.Create(ctx, playbook); err != nil {
return errors.Wrapf(err, "failed to create addNode playbook from kkmachine %q", ctrlclient.ObjectKeyFromObject(kkmachine))
}
// add pipeline name to kkmachine
kkmachine.Annotations[capkkinfrav1beta1.AddNodePipelineAnnotation] = pipeline.Name
// add playbook name to kkmachine
kkmachine.Annotations[capkkinfrav1beta1.AddNodePlaybookAnnotation] = playbook.Name
return nil
}
func (r *KKMachineReconciler) createDeleteNodePipeline(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine) error {
if ok, _ := scope.ifPipelineCompleted(ctx, kkmachine); !ok {
func (r *KKMachineReconciler) createDeleteNodePlaybook(ctx context.Context, scope *clusterScope, kkmachine *capkkinfrav1beta1.KKMachine) error {
if ok, _ := scope.ifPlaybookCompleted(ctx, kkmachine); !ok {
return nil
}
config, err := r.getConfig(scope, kkmachine)
@ -327,7 +327,7 @@ func (r *KKMachineReconciler) createDeleteNodePipeline(ctx context.Context, scop
klog.ErrorS(err, "get default config error, use default config", "kubeVersion", kkmachine.Spec.Version)
}
volumes, volumeMounts := scope.getVolumeMounts(ctx)
pipeline := &kkcorev1.Pipeline{
playbook := &kkcorev1.Playbook{
ObjectMeta: metav1.ObjectMeta{
GenerateName: kkmachine.Name + "-",
Namespace: scope.Namespace,
@ -335,8 +335,8 @@ func (r *KKMachineReconciler) createDeleteNodePipeline(ctx context.Context, scop
clusterv1beta1.ClusterNameLabel: scope.Name,
},
},
Spec: kkcorev1.PipelineSpec{
Project: kkcorev1.PipelineProject{
Spec: kkcorev1.PlaybookSpec{
Project: kkcorev1.PlaybookProject{
Addr: _const.CAPKKProjectdir,
},
Playbook: _const.CAPKKPlaybookDeleteNode,
@ -346,13 +346,13 @@ func (r *KKMachineReconciler) createDeleteNodePipeline(ctx context.Context, scop
Volumes: volumes,
},
}
if err := ctrl.SetControllerReference(kkmachine, pipeline, r.Client.Scheme()); err != nil {
return err
if err := ctrl.SetControllerReference(kkmachine, playbook, r.Client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set ownerReference from kkmachine %q to delNode playbook", ctrlclient.ObjectKeyFromObject(kkmachine))
}
if err := r.Client.Create(ctx, pipeline); err != nil {
return err
if err := r.Client.Create(ctx, playbook); err != nil {
return errors.Wrapf(err, "failed to create delNode playbook from kkmachine %q", ctrlclient.ObjectKeyFromObject(kkmachine))
}
kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePipelineAnnotation] = pipeline.Name
kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePlaybookAnnotation] = playbook.Name
return nil
}
@ -370,38 +370,38 @@ func (r *KKMachineReconciler) getConfig(scope *clusterScope, kkmachine *capkkinf
}
data, err := kubeVersionConfigs.ReadFile(fmt.Sprintf("versions/%s.yaml", *kkmachine.Spec.Version))
if err != nil {
return config, fmt.Errorf("read default config file error: %w", err)
return config, errors.Wrap(err, "failed to read default config file")
}
if err := yaml.Unmarshal(data, config); err != nil {
return config, fmt.Errorf("unmarshal config file error: %w", err)
return config, errors.Wrap(err, "failed to unmarshal config file")
}
klog.InfoS("get default config", "config", config)
}
if err := config.SetValue(_const.Workdir, _const.CAPKKWorkdir); err != nil {
return config, fmt.Errorf("failed to set %q in config error: %w", _const.Workdir, err)
return config, errors.Wrapf(err, "failed to set %q in config", _const.Workdir)
}
if err := config.SetValue("node_name", _const.ProviderID2Host(scope.Name, kkmachine.Spec.ProviderID)); err != nil {
return config, fmt.Errorf("failed to set \"node_name\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"node_name\" in config")
}
if err := config.SetValue("kube_version", kkmachine.Spec.Version); err != nil {
return config, fmt.Errorf("failed to set \"kube_version\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kube_version\" in config")
}
if err := config.SetValue("kubernetes.cluster_name", scope.Cluster.Name); err != nil {
return config, fmt.Errorf("failed to set \"kubernetes.cluster_name\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kubernetes.cluster_name\" in config")
}
if err := config.SetValue("kubernetes.roles", kkmachine.Spec.Roles); err != nil {
return config, fmt.Errorf("failed to set \"kubernetes.roles\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kubernetes.roles\" in config")
}
if err := config.SetValue("cluster_network", scope.Cluster.Spec.ClusterNetwork); err != nil {
return config, fmt.Errorf("failed to set \"cluster_network\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"cluster_network\" in config")
}
switch scope.KKCluster.Spec.ControlPlaneEndpointType {
case capkkinfrav1beta1.ControlPlaneEndpointTypeVIP:
// should set vip addr to config
if err := config.SetValue("kubernetes.control_plane_endpoint.kube_vip.address", scope.Cluster.Spec.ControlPlaneEndpoint.Host); err != nil {
return config, fmt.Errorf("failed to set \"kubernetes.control_plane_endpoint.kube_vip.address\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kubernetes.control_plane_endpoint.kube_vip.address\" in config")
}
case capkkinfrav1beta1.ControlPlaneEndpointTypeDNS:
// do nothing
@ -409,10 +409,10 @@ func (r *KKMachineReconciler) getConfig(scope *clusterScope, kkmachine *capkkinf
return config, errors.New("unsupport ControlPlaneEndpointType")
}
if err := config.SetValue("kubernetes.control_plane_endpoint.host", scope.Cluster.Spec.ControlPlaneEndpoint.Host); err != nil {
return config, fmt.Errorf("failed to set \"kubernetes.kube_vip.address\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kubernetes.kube_vip.address\" in config")
}
if err := config.SetValue("kubernetes.control_plane_endpoint.type", scope.KKCluster.Spec.ControlPlaneEndpointType); err != nil {
return config, fmt.Errorf("failed to set \"kubernetes.kube_vip.enabled\" in config error: %w", err)
return config, errors.Wrap(err, "failed to set \"kubernetes.kube_vip.enabled\" in config")
}
return config, nil

View File

@ -17,8 +17,6 @@ limitations under the License.
package converter
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
@ -28,10 +26,13 @@ import (
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/klog/v2"
"github.com/cockroachdb/errors"
capkkinfrav1beta1 "github.com/kubesphere/kubekey/api/capkk/infrastructure/v1beta1"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
// MarshalBlock marshal block to task
@ -87,13 +88,13 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
if strings.HasSuffix(val, "%") {
b, err := strconv.ParseFloat(val[:len(val)-1], 64)
if err != nil {
return nil, fmt.Errorf("convert serial %v to float error: %w", a, err)
return nil, errors.Wrapf(err, "convert serial %q to float", val)
}
sis[i] = int(math.Ceil(float64(len(hosts)) * b / 100.0))
} else {
b, err := strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("convert serial %v to int error: %w", a, err)
return nil, errors.Wrapf(err, "convert serial %q to int", val)
}
sis[i] = b
}
@ -101,7 +102,7 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
return nil, errors.New("unknown serial type. only support int or percent")
}
if sis[i] == 0 {
return nil, fmt.Errorf("serial %v should not be zero", a)
return nil, errors.Errorf("serial %v should not be zero", a)
}
count += sis[i]
}
@ -134,17 +135,15 @@ func ConvertKKClusterToInventoryHost(kkcluster *capkkinfrav1beta1.KKCluster) (kk
vars := make(map[string]any)
if ih.Vars.Raw != nil {
if err := json.Unmarshal(ih.Vars.Raw, &vars); err != nil {
return nil, fmt.Errorf("unmarshal kkclusters %s to inventory error: %w", ih.Name, err)
return nil, errors.Wrapf(err, "failed to unmarshal kkcluster.spec.InventoryHost %s to inventoryHost", ih.Name)
}
}
vars["connector"] = ih.Connector
vars[_const.VariableConnector] = ih.Connector
data, err := json.Marshal(vars)
if err != nil {
return nil, fmt.Errorf("marshal kkclusters %s to inventory error: %w", ih.Name, err)
}
inventoryHosts[ih.Name] = runtime.RawExtension{
Raw: data,
return nil, errors.Wrapf(err, "marshal kkclusters %s to inventory", ih.Name)
}
inventoryHosts[ih.Name] = runtime.RawExtension{Raw: data}
}
return inventoryHosts, nil

View File

@ -1,13 +1,13 @@
package internal
import (
"fmt"
"math"
"net"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
"github.com/cockroachdb/errors"
"gopkg.in/yaml.v3"
)
@ -67,7 +67,7 @@ func ipFamily(addrOrCIDR string) (string, error) {
// from IP cidr
ipFromCIDR, _, err := net.ParseCIDR(addrOrCIDR)
if err != nil {
return "Invalid", fmt.Errorf("%s is not ip or cidr", addrOrCIDR)
return "Invalid", errors.Errorf("%s is not ip or cidr", addrOrCIDR)
}
ip = ipFromCIDR
}

View File

@ -18,8 +18,8 @@ package tmpl
import (
"bytes"
"fmt"
"github.com/cockroachdb/errors"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"k8s.io/klog/v2"
@ -41,12 +41,12 @@ func ParseFunc[C ~map[string]any, Output any](ctx C, input string, f func([]byte
// Parse the template string
tl, err := internal.Template.Parse(input)
if err != nil {
return f(nil), fmt.Errorf("failed to parse template '%s': %w", input, err)
return f(nil), errors.Wrapf(err, "failed to parse template '%s'", input)
}
// Execute template with provided context
result := bytes.NewBuffer(nil)
if err := tl.Execute(result, ctx); err != nil {
return f(nil), fmt.Errorf("failed to execute template '%s': %w", input, err)
return f(nil), errors.Wrapf(err, "failed to execute template '%s'", input)
}
// Log successful parsing
klog.V(6).InfoS(" parse template succeed", "result", result.String())
@ -70,7 +70,7 @@ func ParseBool(ctx map[string]any, inputs ...string) (bool, error) {
return bytes.EqualFold(o, []byte("true"))
})
if err != nil {
return false, err
return false, errors.WithStack(err)
}
if !output {
return output, nil

View File

@ -3,15 +3,13 @@ package executor
import (
"context"
"encoding/json"
"errors"
"fmt"
"slices"
"time"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@ -42,32 +40,26 @@ func (e blockExecutor) Exec(ctx context.Context) error {
when := e.dealWhen(block.When)
// // check tags
if !tags.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
if !tags.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) {
// if not match the tags. skip
continue
}
// merge variable which defined in block
if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars, hosts...)); err != nil {
klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", block.Name)
return err
return errors.Wrapf(err, "failed to merge block-variable: %q in playbook %q", block.Name, e.playbook)
}
switch {
case len(block.Block) != 0:
if err := e.dealBlock(ctx, hosts, ignoreErrors, when, tags, block); err != nil {
klog.V(5).ErrorS(err, "deal block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
return errors.Wrapf(err, "failed to deal block %q in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
}
case block.IncludeTasks != "":
// do nothing. include tasks has converted to blocks.
default:
if err := e.dealTask(ctx, hosts, when, block); err != nil {
klog.V(5).ErrorS(err, "deal task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
return errors.Wrapf(err, "failed to deal task %s in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
}
}
}
@ -130,11 +122,10 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
when: when,
tags: tags,
}.Exec(ctx)); err != nil {
klog.V(5).ErrorS(err, "execute tasks from block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
errs = errors.Join(errs, err)
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute block %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
}
// if block exec failed exec rescue
if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(block.Rescue) != 0 {
if e.playbook.Status.Phase == kkcorev1.PlaybookPhaseFailed && len(block.Rescue) != 0 {
if err := (blockExecutor{
option: e.option,
hosts: hosts,
@ -144,8 +135,7 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
when: when,
tags: tags,
}.Exec(ctx)); err != nil {
klog.V(5).ErrorS(err, "execute tasks from rescue error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
errs = errors.Join(errs, err)
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute rescue %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
}
}
// exec always after block
@ -159,8 +149,7 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
when: when,
tags: tags,
}.Exec(ctx)); err != nil {
klog.V(5).ErrorS(err, "execute tasks from always error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
errs = errors.Join(errs, err)
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute always %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
}
}
// when execute error. return
@ -174,9 +163,7 @@ func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []stri
for n, a := range block.UnknownField {
data, err := json.Marshal(a)
if err != nil {
klog.V(5).ErrorS(err, "Marshal unknown field error", "field", n, "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
return errors.Wrapf(err, "failed to marshal block %q unknown filed %q in playbook", block.Name, n, ctrlclient.ObjectKeyFromObject(e.playbook))
}
if m := modules.FindModule(n); m != nil {
task.Spec.Module.Name = n
@ -186,23 +173,17 @@ func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []stri
}
}
if task.Spec.Module.Name == "" { // action is necessary for a task
klog.V(5).ErrorS(nil, "No module/action detected in task", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return fmt.Errorf("no module/action detected in task: %s", task.Name)
return errors.Errorf("no module/action detected in task: %s", task.Name)
}
// complete by pipeline
task.GenerateName = e.pipeline.Name + "-"
task.Namespace = e.pipeline.Namespace
if err := ctrl.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil {
klog.V(5).ErrorS(err, "Set controller reference error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
// complete by playbook
task.GenerateName = e.playbook.Name + "-"
task.Namespace = e.playbook.Namespace
if err := ctrl.SetControllerReference(e.playbook, task, e.client.Scheme()); err != nil {
return errors.Wrapf(err, "failed to set playbook %q ownerReferences to %q", ctrlclient.ObjectKeyFromObject(e.playbook), block.Name)
}
if err := (&taskExecutor{option: e.option, task: task, taskRunTimeout: 60 * time.Minute}).Exec(ctx); err != nil {
klog.V(5).ErrorS(err, "exec task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
return errors.Wrapf(err, "failed to execute task %s in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
}
return nil

View File

@ -10,16 +10,16 @@ import (
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
// Executor all task in pipeline
// Executor all task in playbook
type Executor interface {
Exec(ctx context.Context) error
}
// option for pipelineExecutor, blockExecutor, taskExecutor
// option for playbookExecutor, blockExecutor, taskExecutor
type option struct {
client ctrlclient.Client
pipeline *kkcorev1.Pipeline
playbook *kkcorev1.Playbook
variable variable.Variable
// commandLine log output. default os.stdout
logOutput io.Writer

View File

@ -19,20 +19,20 @@ func newTestOption() (*option, error) {
var err error
o := &option{
client: fake.NewClientBuilder().WithScheme(_const.Scheme).WithStatusSubresource(&kkcorev1.Pipeline{}, &kkcorev1alpha1.Task{}).Build(),
pipeline: &kkcorev1.Pipeline{
client: fake.NewClientBuilder().WithScheme(_const.Scheme).WithStatusSubresource(&kkcorev1.Playbook{}, &kkcorev1alpha1.Task{}).Build(),
playbook: &kkcorev1.Playbook{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: corev1.NamespaceDefault,
},
Spec: kkcorev1.PipelineSpec{
Spec: kkcorev1.PlaybookSpec{
InventoryRef: &corev1.ObjectReference{
Name: "test",
Namespace: corev1.NamespaceDefault,
},
},
Status: kkcorev1.PipelineStatus{},
Status: kkcorev1.PlaybookStatus{},
},
logOutput: os.Stdout,
}
@ -48,7 +48,7 @@ func newTestOption() (*option, error) {
return nil, err
}
o.variable, err = variable.New(context.TODO(), o.client, *o.pipeline, source.MemorySource)
o.variable, err = variable.New(context.TODO(), o.client, *o.playbook, source.MemorySource)
if err != nil {
return nil, err
}

View File

@ -18,10 +18,9 @@ package executor
import (
"context"
"errors"
"fmt"
"io"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"k8s.io/klog/v2"
@ -34,48 +33,48 @@ import (
"github.com/kubesphere/kubekey/v4/pkg/variable/source"
)
// NewPipelineExecutor return a new pipelineExecutor
func NewPipelineExecutor(ctx context.Context, client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) Executor {
// NewPlaybookExecutor return a new playbookExecutor
func NewPlaybookExecutor(ctx context.Context, client ctrlclient.Client, playbook *kkcorev1.Playbook, logOutput io.Writer) Executor {
// get variable
v, err := variable.New(ctx, client, *pipeline, source.FileSource)
v, err := variable.New(ctx, client, *playbook, source.FileSource)
if err != nil {
klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
klog.V(5).ErrorS(nil, "convert playbook error", "playbook", ctrlclient.ObjectKeyFromObject(playbook))
return nil
}
return &pipelineExecutor{
return &playbookExecutor{
option: &option{
client: client,
pipeline: pipeline,
playbook: playbook,
variable: v,
logOutput: logOutput,
},
}
}
// executor for pipeline
type pipelineExecutor struct {
// executor for playbook
type playbookExecutor struct {
*option
}
// Exec pipeline. covert playbook to block and executor it.
func (e pipelineExecutor) Exec(ctx context.Context) error {
klog.V(5).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
pj, err := project.New(ctx, *e.pipeline, true)
// Exec playbook. covert playbook to block and executor it.
func (e playbookExecutor) Exec(ctx context.Context) error {
klog.V(5).InfoS("deal project", "playbook", ctrlclient.ObjectKeyFromObject(e.playbook))
pj, err := project.New(ctx, *e.playbook, true)
if err != nil {
return fmt.Errorf("deal project error: %w", err)
return errors.Wrap(err, "failed to deal project")
}
// convert to transfer.Playbook struct
pb, err := pj.MarshalPlaybook()
if err != nil {
return fmt.Errorf("convert playbook error: %w", err)
return errors.Wrap(err, "failed to convert playbook")
}
for _, play := range pb.Play {
// check tags
if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
if !play.Taggable.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) {
// if not match the tags. skip
continue
}
@ -88,17 +87,17 @@ func (e pipelineExecutor) Exec(ctx context.Context) error {
}
// when gather_fact is set. get host's information from remote.
if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil {
return fmt.Errorf("deal gather_facts argument error: %w", err)
return errors.Wrap(err, "failed to deal gather_facts argument")
}
// Batch execution, with each batch being a group of hosts run in serial.
var batchHosts [][]string
if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil {
return fmt.Errorf("deal serial argument error: %w", err)
return errors.Wrap(err, "failed to deal serial argument")
}
e.dealRunOnce(play.RunOnce, hosts, &batchHosts)
// exec pipeline in each BatchHosts
// exec playbook in each BatchHosts
if err := e.execBatchHosts(ctx, play, batchHosts); err != nil {
return fmt.Errorf("exec batch hosts error: %v", err)
return errors.Wrap(err, "failed to exec batch hosts")
}
}
@ -106,18 +105,16 @@ func (e pipelineExecutor) Exec(ctx context.Context) error {
}
// execBatchHosts executor block in play order by: "pre_tasks" > "roles" > "tasks" > "post_tasks"
func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.Play, batchHosts [][]string) any {
func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.Play, batchHosts [][]string) error {
// generate and execute task.
for _, serials := range batchHosts {
// each batch hosts should not be empty.
if len(serials) == 0 {
klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return errors.New("host is empty")
return errors.Errorf("host is empty")
}
if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil {
return fmt.Errorf("merge variable error: %w", err)
return errors.Wrapf(err, "failed to merge variable with play %q", play.Name)
}
// generate task from pre tasks
if err := (blockExecutor{
@ -127,16 +124,16 @@ func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
blocks: play.PreTasks,
tags: play.Taggable,
}.Exec(ctx)); err != nil {
return fmt.Errorf("execute pre-tasks from play error: %w", err)
return errors.Wrapf(err, "failed to execute pre-tasks with play %q", play.Name)
}
// generate task from role
for _, role := range play.Roles {
if !kkprojectv1.JoinTag(role.Taggable, play.Taggable).IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
if !kkprojectv1.JoinTag(role.Taggable, play.Taggable).IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) {
// if not match the tags. skip
continue
}
if err := e.variable.Merge(variable.MergeRuntimeVariable(role.Vars, serials...)); err != nil {
return fmt.Errorf("merge variable error: %w", err)
return errors.Wrapf(err, "failed to merge variable with role %q", role.Role)
}
// use the most closely configuration
ignoreErrors := role.IgnoreErrors
@ -153,7 +150,7 @@ func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
when: role.When.Data,
tags: kkprojectv1.JoinTag(role.Taggable, play.Taggable),
}.Exec(ctx)); err != nil {
return fmt.Errorf("execute role-tasks error: %w", err)
return errors.Wrapf(err, "failed to execute role-tasks")
}
}
// generate task from tasks
@ -164,7 +161,7 @@ func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
blocks: play.Tasks,
tags: play.Taggable,
}.Exec(ctx)); err != nil {
return fmt.Errorf("execute tasks error: %w", err)
return errors.Wrapf(err, "failed to execute tasks")
}
// generate task from post tasks
if err := (blockExecutor{
@ -174,7 +171,7 @@ func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
blocks: play.PostTasks,
tags: play.Taggable,
}.Exec(ctx)); err != nil {
return fmt.Errorf("execute post-tasks error: %w", err)
return errors.Wrapf(err, "failed to execute post-tasks")
}
}
@ -182,10 +179,10 @@ func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
}
// dealHosts "hosts" argument in playbook. get hostname from kkprojectv1.PlayHost
func (e pipelineExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error {
func (e playbookExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error {
ahn, err := e.variable.Get(variable.GetHostnames(host.Hosts))
if err != nil {
return fmt.Errorf("getHostnames error: %w", err)
return errors.Wrapf(err, "failed to get hostnames")
}
if h, ok := ahn.([]string); ok {
@ -199,47 +196,37 @@ func (e pipelineExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) erro
}
// dealGatherFacts "gather_facts" argument in playbook. get host remote info and merge to variable
func (e pipelineExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool, hosts []string) error {
func (e playbookExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool, hosts []string) error {
if !gatherFacts {
// skip
return nil
}
dealGatherFactsInHost := func(hostname string) error {
// get host connector
conn, err := connector.NewConnector(hostname, e.variable)
if err != nil {
klog.V(5).ErrorS(err, "new connector error", "hostname", hostname)
return err
return errors.Wrapf(err, "failed to new connector in host %q", hostname)
}
if err := conn.Init(ctx); err != nil {
klog.V(5).ErrorS(err, "init connection error", "hostname", hostname)
return err
return errors.Wrapf(err, "failed to init connection in host %q", hostname)
}
defer conn.Close(ctx)
if gf, ok := conn.(connector.GatherFacts); ok {
remoteInfo, err := gf.HostInfo(ctx)
if err != nil {
klog.V(5).ErrorS(err, "gatherFacts from connector error", "hostname", hostname)
return err
return errors.Wrapf(err, "failed to execute gather_facts from connector in host %q", hostname)
}
if err := e.variable.Merge(variable.MergeRemoteVariable(remoteInfo, hostname)); err != nil {
klog.V(5).ErrorS(err, "merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", hostname)
return fmt.Errorf("merge gather fact error: %w", err)
return errors.Wrapf(err, "failed to merge gather_facts to in host %q", hostname)
}
}
return nil
}
for _, hostname := range hosts {
if err := dealGatherFactsInHost(hostname); err != nil {
return err
return errors.Wrapf(err, "failed to deal gather_facts for host %q", hostname)
}
}
@ -247,18 +234,18 @@ func (e pipelineExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool,
}
// dealSerial "serial" argument in playbook.
func (e pipelineExecutor) dealSerial(serial []any, hosts []string, batchHosts *[][]string) error {
func (e playbookExecutor) dealSerial(serial []any, hosts []string, batchHosts *[][]string) error {
var err error
*batchHosts, err = converter.GroupHostBySerial(hosts, serial)
if err != nil {
return fmt.Errorf("group host by serial error: %w", err)
return errors.Wrapf(err, "failed to group host by serial")
}
return nil
}
// dealRunOnce argument in playbook. if RunOnce is true. it's always only run in the first hosts.
func (e pipelineExecutor) dealRunOnce(runOnce bool, hosts []string, batchHosts *[][]string) {
func (e playbookExecutor) dealRunOnce(runOnce bool, hosts []string, batchHosts *[][]string) {
if runOnce {
// runOnce only run in first node
*batchHosts = [][]string{{hosts[0]}}

View File

@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/assert"
)
func TestPipelineExecutor_DealRunOnce(t *testing.T) {
func TestPlaybookExecutor_DealRunOnce(t *testing.T) {
testcases := []struct {
name string
runOnce bool
@ -31,7 +31,7 @@ func TestPipelineExecutor_DealRunOnce(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
pipelineExecutor{}.dealRunOnce(tc.runOnce, tc.hosts, &tc.batchHosts)
playbookExecutor{}.dealRunOnce(tc.runOnce, tc.hosts, &tc.batchHosts)
assert.Equal(t, tc.batchHosts, tc.except)
})
}

View File

@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"github.com/schollz/progressbar/v3"
@ -37,44 +38,40 @@ type taskExecutor struct {
func (e *taskExecutor) Exec(ctx context.Context) error {
// create task
if err := e.client.Create(ctx, e.task); err != nil {
klog.V(5).ErrorS(err, "create task error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
return err
return errors.Wrapf(err, "failed to create task %q", e.task.Spec.Name)
}
defer func() {
e.pipeline.Status.TaskResult.Total++
e.playbook.Status.TaskResult.Total++
switch e.task.Status.Phase {
case kkcorev1alpha1.TaskPhaseSuccess:
e.pipeline.Status.TaskResult.Success++
e.playbook.Status.TaskResult.Success++
case kkcorev1alpha1.TaskPhaseIgnored:
e.pipeline.Status.TaskResult.Ignored++
e.playbook.Status.TaskResult.Ignored++
case kkcorev1alpha1.TaskPhaseFailed:
e.pipeline.Status.TaskResult.Failed++
e.playbook.Status.TaskResult.Failed++
}
}()
// 执行任务
// run task
if err := e.runTaskLoop(ctx); err != nil {
return err
return errors.Wrapf(err, "failed to run task %q", ctrlclient.ObjectKeyFromObject(e.task))
}
// exit when task run failed
if e.task.IsFailed() {
var hostReason []kkcorev1.PipelineFailedDetailHost
var hostReason []kkcorev1.PlaybookFailedDetailHost
for _, tr := range e.task.Status.HostResults {
hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{
hostReason = append(hostReason, kkcorev1.PlaybookFailedDetailHost{
Host: tr.Host,
Stdout: tr.Stdout,
StdErr: tr.StdErr,
})
}
e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{
e.playbook.Status.FailedDetail = append(e.playbook.Status.FailedDetail, kkcorev1.PlaybookFailedDetail{
Task: e.task.Spec.Name,
Hosts: hostReason,
})
e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
e.playbook.Status.Phase = kkcorev1.PlaybookPhaseFailed
return fmt.Errorf("task %q run failed", e.task.Spec.Name)
return errors.Errorf("task %q run failed", e.task.Spec.Name)
}
return nil
@ -127,13 +124,13 @@ func (e *taskExecutor) runTaskLoop(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return fmt.Errorf("task %q cancelled: %w", e.task.Spec.Name, ctx.Err())
return nil
case <-time.After(e.taskRunTimeout):
return fmt.Errorf("task %q execution timeout", e.task.Spec.Name)
return errors.Errorf("task %q execution timeout", e.task.Spec.Name)
case <-ticker.C:
result, err := reconcile(ctx, ctrl.Request{NamespacedName: ctrlclient.ObjectKeyFromObject(e.task)})
if err != nil {
klog.V(5).ErrorS(err, "failed to reconcile task", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
klog.V(5).ErrorS(err, "failed to reconcile task", "task", ctrlclient.ObjectKeyFromObject(e.task), "playbook", ctrlclient.ObjectKeyFromObject(e.playbook))
}
if result.Requeue {
continue
@ -260,7 +257,7 @@ func (e *taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, s
return true, nil
}
if err := bar.Add(1); err != nil {
return false, err
return false, errors.Wrap(err, "failed to process bar")
}
return false, nil
@ -314,7 +311,7 @@ func (e *taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.T
Host: host,
Variable: e.variable,
Task: *e.task,
Pipeline: *e.pipeline,
Playbook: *e.playbook,
})
}
@ -395,7 +392,7 @@ func (e *taskExecutor) dealRegister(stdout, stderr, host string) error {
"stderr": stderrResult,
},
}, host)); err != nil {
return fmt.Errorf("register task result to variable error: %w", err)
return errors.Wrap(err, "failed to register task result to variable")
}
}

View File

@ -24,6 +24,7 @@ import (
"path/filepath"
"time"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@ -34,7 +35,7 @@ import (
type commandManager struct {
workdir string
*kkcorev1.Pipeline
*kkcorev1.Playbook
*kkcorev1.Inventory
ctrlclient.Client
@ -42,7 +43,7 @@ type commandManager struct {
logOutput io.Writer
}
// Run command Manager. print log and run pipeline executor.
// Run command Manager. print log and run playbook executor.
func (m *commandManager) Run(ctx context.Context) error {
fmt.Fprint(m.logOutput, `
@ -56,36 +57,36 @@ func (m *commandManager) Run(ctx context.Context) error {
|___/
`)
fmt.Fprintf(m.logOutput, "%s [Pipeline %s] start\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline))
cp := m.Pipeline.DeepCopy()
fmt.Fprintf(m.logOutput, "%s [Playbook %s] start\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Playbook))
cp := m.Playbook.DeepCopy()
defer func() {
fmt.Fprintf(m.logOutput, "%s [Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline),
m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed)
fmt.Fprintf(m.logOutput, "%s [Playbook %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Playbook),
m.Playbook.Status.TaskResult.Total, m.Playbook.Status.TaskResult.Success, m.Playbook.Status.TaskResult.Ignored, m.Playbook.Status.TaskResult.Failed)
go func() {
if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kkcorev1.PipelinePhaseSucceeded {
if !m.Playbook.Spec.Debug && m.Playbook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded {
<-ctx.Done()
fmt.Fprintf(m.logOutput, "%s [Pipeline %s] clean runtime directory\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Pipeline))
fmt.Fprintf(m.logOutput, "%s [Playbook %s] clean runtime directory\n", time.Now().Format(time.TimeOnly+" MST"), ctrlclient.ObjectKeyFromObject(m.Playbook))
// clean runtime directory
if err := os.RemoveAll(filepath.Join(m.workdir, _const.RuntimeDir)); err != nil {
klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", filepath.Join(m.workdir, _const.RuntimeDir))
klog.ErrorS(err, "clean runtime directory error", "playbook", ctrlclient.ObjectKeyFromObject(m.Playbook), "runtime_dir", filepath.Join(m.workdir, _const.RuntimeDir))
}
}
}()
// update pipeline status
if err := m.Client.Status().Patch(ctx, m.Pipeline, ctrlclient.MergeFrom(cp)); err != nil {
klog.ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
// update playbook status
if err := m.Client.Status().Patch(ctx, m.Playbook, ctrlclient.MergeFrom(cp)); err != nil {
klog.ErrorS(err, "update playbook error", "playbook", ctrlclient.ObjectKeyFromObject(m.Playbook))
}
}()
if err := executor.NewPipelineExecutor(ctx, m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil {
klog.ErrorS(err, "executor tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
m.Pipeline.Status.FailureReason = kkcorev1.PipelineFailedReasonTaskFailed
m.Pipeline.Status.FailureMessage = err.Error()
if err := executor.NewPlaybookExecutor(ctx, m.Client, m.Playbook, m.logOutput).Exec(ctx); err != nil {
klog.ErrorS(err, "executor tasks error", "playbook", ctrlclient.ObjectKeyFromObject(m.Playbook))
m.Playbook.Status.Phase = kkcorev1.PlaybookPhaseFailed
m.Playbook.Status.FailureReason = kkcorev1.PlaybookFailedReasonTaskFailed
m.Playbook.Status.FailureMessage = err.Error()
return err
return errors.Wrapf(err, "failed to executor playbook %q", ctrlclient.ObjectKeyFromObject(m.Playbook))
}
m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseSucceeded
m.Playbook.Status.Phase = kkcorev1.PlaybookPhaseSucceeded
return nil
}

View File

@ -18,9 +18,8 @@ package manager
import (
"context"
"errors"
"fmt"
"github.com/cockroachdb/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
@ -38,7 +37,7 @@ func (m controllerManager) Run(ctx context.Context) error {
ctrl.SetLogger(klog.NewKlogr())
restconfig, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("cannot get restconfig in kubernetes. error is %w", err)
return errors.Wrap(err, "failed to get restconfig in kubernetes")
}
mgr, err := ctrl.NewManager(restconfig, ctrl.Options{
@ -49,17 +48,17 @@ func (m controllerManager) Run(ctx context.Context) error {
HealthProbeBindAddress: ":9440",
})
if err != nil {
return fmt.Errorf("failed to create controller manager. error: %w", err)
return errors.Wrap(err, "failed to create controller manager")
}
if err := mgr.AddHealthzCheck("default", healthz.Ping); err != nil {
return fmt.Errorf("failed to add default healthcheck. error: %w", err)
return errors.Wrap(err, "failed to add default healthcheck")
}
if err := mgr.AddReadyzCheck("default", healthz.Ping); err != nil {
return fmt.Errorf("failed to add default readycheck. error: %w", err)
return errors.Wrap(err, "failed to add default readycheck")
}
if err := m.register(mgr); err != nil {
return err
return errors.Wrap(err, "failed to register manager")
}
return mgr.Start(ctx)
@ -76,7 +75,7 @@ func (m controllerManager) register(mgr ctrl.Manager) error {
continue
}
if err := c.SetupWithManager(mgr, *m.ControllerManagerServerOptions); err != nil {
return fmt.Errorf("failed to register controller %q. error: %w", c.Name(), err)
return errors.Wrapf(err, "failed to register controller %q", c.Name())
}
}

View File

@ -35,7 +35,7 @@ type Manager interface {
// CommandManagerOptions for NewCommandManager
type CommandManagerOptions struct {
Workdir string
*kkcorev1.Pipeline
*kkcorev1.Playbook
*kkcorev1.Config
*kkcorev1.Inventory
@ -46,7 +46,7 @@ type CommandManagerOptions struct {
func NewCommandManager(o CommandManagerOptions) Manager {
return &commandManager{
workdir: o.Workdir,
Pipeline: o.Pipeline,
Playbook: o.Playbook,
Inventory: o.Inventory,
Client: o.Client,
logOutput: os.Stdout,

View File

@ -18,9 +18,9 @@ package modules
import (
"context"
"errors"
"fmt"
"github.com/cockroachdb/errors"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
@ -39,7 +39,8 @@ type assertArgs struct {
func newAssertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*assertArgs, error) {
var err error
aa := &assertArgs{}
var aa = &assertArgs{}
args := variable.Extension2Variables(raw)
if aa.that, err = variable.StringSliceVar(vars, args, "that"); err != nil {
return nil, errors.New("\"that\" should be []string or string")
@ -72,8 +73,6 @@ func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) {
aa, err := newAssertArgs(ctx, options.Args, ha)
if err != nil {
klog.V(4).ErrorS(err, "get assert args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
return "", err.Error()
}

View File

@ -39,7 +39,7 @@ func TestCommand(t *testing.T) {
Variable: &testVariable{},
},
ctxFunc: context.Background,
exceptStderr: "failed to connector of \"\" error: host is not set",
exceptStderr: "failed to connector of \"\" error: failed to init connector for host \"\": host is not set",
},
{
name: "exec command success",

View File

@ -18,7 +18,6 @@ package modules
import (
"context"
"errors"
"fmt"
"io/fs"
"math"
@ -26,11 +25,11 @@ import (
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"github.com/kubesphere/kubekey/v4/pkg/project"
@ -77,8 +76,6 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
ca, err := newCopyArgs(ctx, options.Args, ha)
if err != nil {
klog.V(4).ErrorS(err, "get copy args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
return "", err.Error()
}
@ -123,7 +120,7 @@ func (ca copyArgs) copySrc(ctx context.Context, options ExecOptions, conn connec
return StdoutSuccess, ""
}
dealRelativeFilePath := func() (string, string) {
pj, err := project.New(ctx, options.Pipeline, false)
pj, err := project.New(ctx, options.Playbook, false)
if err != nil {
return "", fmt.Sprintf("get project error: %v", err)
}
@ -178,12 +175,12 @@ func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string,
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %w", ca.src, err)
return errors.Wrapf(err, "failed to walk dir %s", ca.src)
}
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %w", err)
return errors.Wrap(err, "failed to get file info")
}
mode := info.Mode()
@ -193,25 +190,25 @@ func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string,
data, err := pj.ReadFile(path, project.GetFileOption{Role: role})
if err != nil {
return fmt.Errorf("read file error: %w", err)
return errors.Wrap(err, "failed to read file")
}
dest := ca.dest
if strings.HasSuffix(ca.dest, "/") {
rel, err := pj.Rel(ca.src, path, project.GetFileOption{Role: role})
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
return errors.Wrap(err, "failed to get relative file path")
}
dest = filepath.Join(ca.dest, rel)
}
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to copy file")
}
return nil
}); err != nil {
return err
return errors.Wrapf(err, "failed to work dir %a", ca.src)
}
return nil
@ -229,7 +226,7 @@ func (ca copyArgs) readFile(ctx context.Context, data []byte, mode fs.FileMode,
}
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to copy file")
}
return nil
@ -243,12 +240,12 @@ func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error {
}
if err != nil {
return fmt.Errorf("walk dir %s error: %w", ca.src, err)
return errors.WithStack(err)
}
// get file old mode
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %w", err)
return errors.Wrapf(err, "failed to get file %q info", path)
}
mode := info.Mode()
@ -258,25 +255,25 @@ func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error {
// read file
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("read file error: %w", err)
return errors.Wrapf(err, "failed to read file %q", path)
}
// copy file to remote
dest := ca.dest
if strings.HasSuffix(ca.dest, "/") {
rel, err := filepath.Rel(ca.src, path)
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
return errors.Wrap(err, "failed to get relative filepath")
}
dest = filepath.Join(ca.dest, rel)
}
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to put file")
}
return nil
}); err != nil {
return err
return errors.Wrapf(err, "failed to walk dir %q", ca.src)
}
return nil

View File

@ -22,8 +22,6 @@ import (
"os"
"path/filepath"
"k8s.io/klog/v2"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@ -61,8 +59,6 @@ func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) {
destFile, err := os.Create(destParam)
if err != nil {
klog.V(4).ErrorS(err, "failed to create dest file")
return "", err.Error()
}
defer destFile.Close()

View File

@ -9,7 +9,6 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math"
"math/big"
@ -17,6 +16,7 @@ import (
"os"
"time"
"github.com/cockroachdb/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
@ -62,11 +62,11 @@ type genCertArgs struct {
func (gca genCertArgs) signedCertificate(cfg *cgutilcert.Config) (string, string) {
parentKey, err := TryLoadKeyFromDisk(gca.rootKey)
if err != nil {
return "", fmt.Sprintf("failed to load root key: %v", err)
return "", fmt.Sprintf("failed to load root key: %+v", err)
}
parentCert, _, err := TryLoadCertChainFromDisk(gca.rootCert)
if err != nil {
return "", fmt.Sprintf("failed to load root certificate: %v", err)
return "", fmt.Sprintf("failed to load root certificate: %+v", err)
}
if gca.policy == policyIfNotPresent {
@ -204,10 +204,10 @@ func WriteKey(outKey string, key crypto.Signer, policy string) error {
encoded, err := keyutil.MarshalPrivateKeyToPEM(key)
if err != nil {
return fmt.Errorf("unable to marshal private key to PEM, error: %w", err)
return errors.Wrap(err, "failed to marshal private key to PEM")
}
if err := keyutil.WriteKey(outKey, encoded); err != nil {
return fmt.Errorf("unable to write private key to file %s, error: %w", outKey, err)
return errors.Wrapf(err, "failed to write private key to file %s", outKey)
}
return nil
@ -224,7 +224,7 @@ func WriteCert(outCert string, cert *x509.Certificate, policy string) error {
}
if err := cgutilcert.WriteCert(outCert, EncodeCertPEM(cert)); err != nil {
return fmt.Errorf("unable to write certificate to file %s, error: %w", outCert, err)
return errors.Wrapf(err, "failed to write certificate to file %s", outCert)
}
return nil
@ -245,7 +245,7 @@ func TryLoadKeyFromDisk(rootKey string) (crypto.Signer, error) {
// Parse the private key from a file
privKey, err := keyutil.PrivateKeyFromFile(rootKey)
if err != nil {
return nil, fmt.Errorf("couldn't load the private key file %s, error: %w", rootKey, err)
return nil, errors.Wrapf(err, "failed to load the private key file %s", rootKey)
}
// Allow RSA and ECDSA formats only
@ -256,7 +256,7 @@ func TryLoadKeyFromDisk(rootKey string) (crypto.Signer, error) {
case *ecdsa.PrivateKey:
key = k
default:
return nil, fmt.Errorf("the private key file %s is neither in RSA nor ECDSA format", rootKey)
return nil, errors.Errorf("the private key file %s is neither in RSA nor ECDSA format", rootKey)
}
return key, nil
@ -266,7 +266,7 @@ func TryLoadKeyFromDisk(rootKey string) (crypto.Signer, error) {
func TryLoadCertChainFromDisk(rootCert string) (*x509.Certificate, []*x509.Certificate, error) {
certs, err := cgutilcert.CertsFromFile(rootCert)
if err != nil {
return nil, nil, fmt.Errorf("couldn't load the certificate file %s, error: %w", rootCert, err)
return nil, nil, errors.Wrapf(err, "failed to load the certificate file %s", rootCert)
}
cert := certs[0]
@ -306,7 +306,7 @@ func NewSelfSignedCACert(cfg cgutilcert.Config, after time.Duration, key crypto.
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to generate certificate's SerialNumber number")
}
serial = new(big.Int).Add(serial, big.NewInt(1))
@ -334,7 +334,7 @@ func NewSelfSignedCACert(cfg cgutilcert.Config, after time.Duration, key crypto.
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to create certificate")
}
return x509.ParseCertificate(certDERBytes)
@ -345,7 +345,7 @@ func NewSignedCert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to generate certificate's SerialNumber number")
}
serial = new(big.Int).Add(serial, big.NewInt(1))
@ -382,7 +382,7 @@ func NewSignedCert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to create certificate")
}
return x509.ParseCertificate(certDERBytes)
@ -415,10 +415,10 @@ func ValidateCertPeriod(cert *x509.Certificate, offset time.Duration) error {
period := fmt.Sprintf("NotBefore: %v, NotAfter: %v", cert.NotBefore, cert.NotAfter)
now := time.Now().Add(offset)
if now.Before(cert.NotBefore) {
return fmt.Errorf("the certificate is not valid yet: %s", period)
return errors.Errorf("the certificate is not valid yet: %s", period)
}
if now.After(cert.NotAfter) {
return fmt.Errorf("the certificate has expired: %s", period)
return errors.Errorf("the certificate has expired: %s", period)
}
return nil
@ -442,7 +442,7 @@ func VerifyCertChain(cert *x509.Certificate, intermediates []*x509.Certificate,
}
if _, err := cert.Verify(verifyOptions); err != nil {
return err
return errors.Wrapf(err, "failed to verify certificate")
}
return nil
@ -453,13 +453,13 @@ func VerifyCertChain(cert *x509.Certificate, intermediates []*x509.Certificate,
func validateCertificateWithConfig(cert *x509.Certificate, baseName string, cfg *cgutilcert.Config) error {
for _, dnsName := range cfg.AltNames.DNSNames {
if err := cert.VerifyHostname(dnsName); err != nil {
return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err)
return errors.Wrapf(err, "certificate %s is invalid", baseName)
}
}
for _, ipAddress := range cfg.AltNames.IPs {
if err := cert.VerifyHostname(ipAddress.String()); err != nil {
return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err)
return errors.Wrapf(err, "certificate %s is invalid", baseName)
}
}

View File

@ -21,7 +21,6 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
@ -30,6 +29,8 @@ import (
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
"github.com/containerd/containerd/images"
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
@ -60,7 +61,7 @@ func (i imagePullArgs) pull(ctx context.Context) error {
for _, img := range i.manifests {
src, err := remote.NewRepository(img)
if err != nil {
return fmt.Errorf("failed to get remote image: %w", err)
return errors.Wrapf(err, "failed to get remote image %p", img)
}
src.Client = &auth.Client{
Client: &http.Client{
@ -79,11 +80,11 @@ func (i imagePullArgs) pull(ctx context.Context) error {
dst, err := newLocalRepository(filepath.Join(domain, src.Reference.Repository)+":"+src.Reference.Reference, i.imagesDir)
if err != nil {
return fmt.Errorf("failed to get local image: %w", err)
return errors.Wrapf(err, "failed to get local repository %q for image %q", i.imagesDir, img)
}
if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil {
return fmt.Errorf("failed to copy image: %w", err)
return errors.Wrapf(err, "failed to pull image %q to local dir", img)
}
}
@ -102,15 +103,15 @@ type imagePushArgs struct {
// push local dir images to remote registry
func (i imagePushArgs) push(ctx context.Context) error {
manifests, err := findLocalImageManifests(i.imagesDir)
klog.V(5).Info("manifests found", "manifests", manifests)
if err != nil {
return fmt.Errorf("failed to find local image manifests: %w", err)
return errors.Wrapf(err, "failed to find image manifests in local dir %p", i.imagesDir)
}
klog.V(5).Info("manifests found", "manifests", manifests)
for _, img := range manifests {
src, err := newLocalRepository(filepath.Join(domain, img), i.imagesDir)
if err != nil {
return fmt.Errorf("failed to get local image: %w", err)
return errors.Wrapf(err, "failed to get local repository %q for image %q", i.imagesDir, img)
}
repo := src.Reference.Repository
if i.namespace != "" {
@ -119,7 +120,7 @@ func (i imagePushArgs) push(ctx context.Context) error {
dst, err := remote.NewRepository(filepath.Join(i.registry, repo) + ":" + src.Reference.Reference)
if err != nil {
return fmt.Errorf("failed to get remote repo: %w", err)
return errors.Wrapf(err, "failed to get remote repository %q", filepath.Join(i.registry, repo)+":"+src.Reference.Reference)
}
dst.Client = &auth.Client{
Client: &http.Client{
@ -137,7 +138,7 @@ func (i imagePushArgs) push(ctx context.Context) error {
}
if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil {
return fmt.Errorf("failed to copy image: %w", err)
return errors.Wrapf(err, "failed to push image %q to remote", img)
}
}
@ -250,7 +251,7 @@ func findLocalImageManifests(localDir string) ([]string, error) {
var manifests []string
if err := filepath.WalkDir(localDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
return errors.WithStack(err)
}
if path == filepath.Join(localDir, "blobs") {
@ -263,14 +264,13 @@ func findLocalImageManifests(localDir string) ([]string, error) {
file, err := os.ReadFile(path)
if err != nil {
return err
return errors.Wrapf(err, "failed to read file %q", path)
}
var data map[string]any
if err := json.Unmarshal(file, &data); err != nil {
// skip un except file (empty)
klog.V(4).ErrorS(err, "unmarshal manifests file error", "file", path)
// skip un-except file (empty)
return nil
}
@ -278,18 +278,19 @@ func findLocalImageManifests(localDir string) ([]string, error) {
if !ok {
return errors.New("invalid mediaType")
}
if mediaType == imagev1.MediaTypeImageIndex || mediaType == "application/vnd.docker.distribution.manifest.list.v2+json" {
if mediaType == imagev1.MediaTypeImageIndex || mediaType == imagev1.MediaTypeImageManifest || // oci multi or single schema
mediaType == images.MediaTypeDockerSchema2ManifestList || mediaType == images.MediaTypeDockerSchema2Manifest { // docker multi or single schema
subpath, err := filepath.Rel(localDir, path)
if err != nil {
return err
return errors.Wrap(err, "failed to get relative filepath")
}
// the last dir is manifests. should delete it
// the parent dir of subpath is "manifests". should delete it
manifests = append(manifests, filepath.Dir(filepath.Dir(subpath))+":"+filepath.Base(subpath))
}
return nil
}); err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to work dir %q", localDir)
}
return manifests, nil
@ -299,7 +300,7 @@ func findLocalImageManifests(localDir string) ([]string, error) {
func newLocalRepository(reference, localDir string) (*remote.Repository, error) {
ref, err := registry.ParseReference(reference)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to parse reference %q", reference)
}
return &remote.Repository{
@ -325,56 +326,56 @@ type imageTransport struct {
func (i imageTransport) RoundTrip(request *http.Request) (*http.Response, error) {
switch request.Method {
case http.MethodHead: // check if file exist
return i.head(request)
return i.head(request), nil
case http.MethodPost:
return i.post(request)
return i.post(request), nil
case http.MethodPut:
return i.put(request)
return i.put(request), nil
case http.MethodGet:
return i.get(request)
return i.get(request), nil
default:
return responseNotAllowed, nil
}
}
// head method for http.MethodHead. check if file is exist in blobs dir or manifests dir
func (i imageTransport) head(request *http.Request) (*http.Response, error) {
func (i imageTransport) head(request *http.Request) *http.Response {
if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs
filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path))
if _, err := os.Stat(filename); err != nil {
klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
return responseNotFound, nil
return responseNotFound
}
return responseOK, nil
return responseOK
} else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests
filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
if _, err := os.Stat(filename); err != nil {
klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
return responseNotFound, nil
return responseNotFound
}
file, err := os.ReadFile(filename)
if err != nil {
klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
return responseServerError, nil
return responseServerError
}
var data map[string]any
if err := json.Unmarshal(file, &data); err != nil {
klog.V(4).ErrorS(err, "failed to unmarshal file", "filename", filename)
return responseServerError, nil
return responseServerError
}
mediaType, ok := data["mediaType"].(string)
if !ok {
klog.V(4).ErrorS(nil, "unknown mediaType", "filename", filename)
return responseServerError, nil
return responseServerError
}
return &http.Response{
@ -384,14 +385,14 @@ func (i imageTransport) head(request *http.Request) (*http.Response, error) {
"Content-Type": []string{mediaType},
},
ContentLength: int64(len(file)),
}, nil
}
}
return responseNotAllowed, nil
return responseNotAllowed
}
// post method for http.MethodPost, accept request.
func (i imageTransport) post(request *http.Request) (*http.Response, error) {
func (i imageTransport) post(request *http.Request) *http.Response {
if strings.HasSuffix(request.URL.Path, "/uploads/") {
return &http.Response{
Proto: "Local",
@ -400,68 +401,80 @@ func (i imageTransport) post(request *http.Request) (*http.Response, error) {
"Location": []string{filepath.Dir(request.URL.Path)},
},
Request: request,
}, nil
}
}
return responseNotAllowed, nil
return responseNotAllowed
}
// put method for http.MethodPut, create file in blobs dir or manifests dir
func (i imageTransport) put(request *http.Request) (*http.Response, error) {
func (i imageTransport) put(request *http.Request) *http.Response {
if strings.HasSuffix(request.URL.Path, "/uploads") { // blobs
body, err := io.ReadAll(request.Body)
if err != nil {
return responseServerError, nil
klog.V(4).ErrorS(err, "failed to read request")
return responseServerError
}
defer request.Body.Close()
filename := filepath.Join(i.baseDir, "blobs", request.URL.Query().Get("digest"))
if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
return responseServerError, nil
klog.V(4).ErrorS(err, "failed to create dir", "dir", filepath.Dir(filename))
return responseServerError
}
if err := os.WriteFile(filename, body, os.ModePerm); err != nil {
return responseServerError, nil
klog.V(4).ErrorS(err, "failed to write file", "filename", filename)
return responseServerError
}
return responseCreated, nil
return responseCreated
} else if strings.HasSuffix(filepath.Dir(request.URL.Path), "/manifests") { // manifest
filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
return responseServerError, nil
}
body, err := io.ReadAll(request.Body)
if err != nil {
return responseServerError, nil
klog.V(4).ErrorS(err, "failed to read request")
return responseServerError
}
defer request.Body.Close()
if err := os.WriteFile(filename, body, os.ModePerm); err != nil {
return responseServerError, nil
filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil {
klog.V(4).ErrorS(err, "failed to create dir", "dir", filepath.Dir(filename))
return responseServerError
}
return responseCreated, nil
if err := os.WriteFile(filename, body, os.ModePerm); err != nil {
klog.V(4).ErrorS(err, "failed to write file", "filename", filename)
return responseServerError
}
return responseCreated
}
return responseNotAllowed, nil
return responseNotAllowed
}
// get method for http.MethodGet, get file in blobs dir or manifest dir
func (i imageTransport) get(request *http.Request) (*http.Response, error) {
func (i imageTransport) get(request *http.Request) *http.Response {
if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs
filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path))
if _, err := os.Stat(filename); err != nil {
klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
return responseNotFound, nil
return responseNotFound
}
file, err := os.ReadFile(filename)
if err != nil {
klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
return responseServerError, nil
return responseServerError
}
return &http.Response{
@ -469,30 +482,34 @@ func (i imageTransport) get(request *http.Request) (*http.Response, error) {
StatusCode: http.StatusOK,
ContentLength: int64(len(file)),
Body: io.NopCloser(bytes.NewReader(file)),
}, nil
}
} else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests
filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix))
if _, err := os.Stat(filename); err != nil {
klog.V(4).ErrorS(err, "failed to stat blobs", "filename", filename)
return responseNotFound, nil
return responseNotFound
}
file, err := os.ReadFile(filename)
if err != nil {
klog.V(4).ErrorS(err, "failed to read file", "filename", filename)
return responseServerError, nil
return responseServerError
}
var data map[string]any
if err := json.Unmarshal(file, &data); err != nil {
return responseServerError, err
klog.V(4).ErrorS(err, "failed to unmarshal file data", "filename", filename)
return responseServerError
}
mediaType, ok := data["mediaType"].(string)
if !ok {
return responseServerError, nil
klog.V(4).ErrorS(nil, "unknown mediaType", "filename", filename)
return responseServerError
}
return &http.Response{
@ -503,8 +520,8 @@ func (i imageTransport) get(request *http.Request) (*http.Response, error) {
},
ContentLength: int64(len(file)),
Body: io.NopCloser(bytes.NewReader(file)),
}, nil
}
}
return responseNotAllowed, nil
return responseNotAllowed
}

View File

@ -18,13 +18,12 @@ package modules
import (
"context"
"fmt"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"github.com/kubesphere/kubekey/v4/pkg/variable"
@ -55,19 +54,19 @@ type ExecOptions struct {
variable.Variable
// the task to be executed
Task kkcorev1alpha1.Task
// the pipeline to be executed
Pipeline kkcorev1.Pipeline
// the playbook to be executed
Playbook kkcorev1.Playbook
}
func (o ExecOptions) getAllVariables() (map[string]any, error) {
ha, err := o.Variable.Get(variable.GetAllVariable(o.Host))
if err != nil {
return nil, fmt.Errorf("failed to get host %s variable: %w", o.Host, err)
return nil, errors.Wrapf(err, "failed to get host %s variable", o.Host)
}
vd, ok := ha.(map[string]any)
if !ok {
return nil, fmt.Errorf("host: %s variable is not a map", o.Host)
return nil, errors.Errorf("host: %s variable is not a map", o.Host)
}
return vd, nil
@ -78,7 +77,7 @@ var module = make(map[string]ModuleExecFunc)
// RegisterModule register module
func RegisterModule(moduleName string, exec ModuleExecFunc) error {
if _, ok := module[moduleName]; ok {
return fmt.Errorf("module %s is exist", moduleName)
return errors.Errorf("module %s is exist", moduleName)
}
module[moduleName] = exec
@ -120,14 +119,12 @@ func getConnector(ctx context.Context, host string, v variable.Variable) (connec
} else {
conn, err = connector.NewConnector(host, v)
if err != nil {
return conn, err
return conn, errors.Wrapf(err, "failed to get connector for host %q", host)
}
}
if err = conn.Init(ctx); err != nil {
klog.V(4).ErrorS(err, "failed to init connector")
return conn, err
return conn, errors.Wrapf(err, "failed to init connector for host %q", host)
}
return conn, nil

View File

@ -18,10 +18,11 @@ package modules
import (
"context"
"errors"
"io"
"io/fs"
"github.com/cockroachdb/errors"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)

View File

@ -44,7 +44,7 @@ func TestSetFact(t *testing.T) {
Host: "",
Variable: &testVariable{},
Task: kkcorev1alpha1.Task{},
Pipeline: kkcorev1.Pipeline{},
Playbook: kkcorev1.Playbook{},
},
exceptStdout: "success",
},
@ -57,7 +57,7 @@ func TestSetFact(t *testing.T) {
Host: "",
Variable: &testVariable{},
Task: kkcorev1alpha1.Task{},
Pipeline: kkcorev1.Pipeline{},
Playbook: kkcorev1.Playbook{},
},
exceptStdout: "success",
},
@ -70,7 +70,7 @@ func TestSetFact(t *testing.T) {
Host: "",
Variable: &testVariable{},
Task: kkcorev1alpha1.Task{},
Pipeline: kkcorev1.Pipeline{},
Playbook: kkcorev1.Playbook{},
},
exceptStdout: "success",
},
@ -83,7 +83,7 @@ func TestSetFact(t *testing.T) {
Host: "",
Variable: &testVariable{},
Task: kkcorev1alpha1.Task{},
Pipeline: kkcorev1.Pipeline{},
Playbook: kkcorev1.Playbook{},
},
exceptStderr: "only support bool, int, float64, string value for \"k\".",
},
@ -96,7 +96,7 @@ func TestSetFact(t *testing.T) {
Host: "",
Variable: &testVariable{},
Task: kkcorev1alpha1.Task{},
Pipeline: kkcorev1.Pipeline{},
Playbook: kkcorev1.Playbook{},
},
exceptStderr: "only support bool, int, float64, string value for \"k\".",
},

View File

@ -18,7 +18,6 @@ package modules
import (
"context"
"errors"
"fmt"
"io/fs"
"math"
@ -26,11 +25,11 @@ import (
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kubesphere/kubekey/v4/pkg/connector"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
@ -52,8 +51,6 @@ func newTemplateArgs(_ context.Context, raw runtime.RawExtension, vars map[strin
ta.src, err = variable.StringVar(vars, args, "src")
if err != nil {
klog.V(4).ErrorS(err, "\"src\" should be string")
return nil, errors.New("\"src\" should be string")
}
@ -85,8 +82,6 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
ta, err := newTemplateArgs(ctx, options.Args, ha)
if err != nil {
klog.V(4).ErrorS(err, "get template args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
return "", err.Error()
}
@ -120,7 +115,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
return StdoutSuccess, ""
}
dealRelativeFilePath := func() (string, string) {
pj, err := project.New(ctx, options.Pipeline, false)
pj, err := project.New(ctx, options.Playbook, false)
if err != nil {
return "", fmt.Sprintf("get project error: %v", err)
}
@ -154,10 +149,10 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
}
// relFile when template.src is relative file, get file from project, parse it, and copy to remote.
func (ta templateArgs) readFile(ctx context.Context, data string, mode fs.FileMode, conn connector.Connector, vars map[string]any) any {
func (ta templateArgs) readFile(ctx context.Context, data string, mode fs.FileMode, conn connector.Connector, vars map[string]any) error {
result, err := tmpl.Parse(vars, data)
if err != nil {
return fmt.Errorf("parse file error: %w", err)
return errors.Wrap(err, "failed to parse file")
}
dest := ta.dest
@ -170,7 +165,7 @@ func (ta templateArgs) readFile(ctx context.Context, data string, mode fs.FileMo
}
if err := conn.PutFile(ctx, result, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to copy file")
}
return nil
@ -183,12 +178,12 @@ func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role stri
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %w", ta.src, err)
return errors.WithStack(err)
}
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %w", err)
return errors.Wrapf(err, "failed to get file %q info", path)
}
mode := info.Mode()
@ -198,29 +193,29 @@ func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role stri
data, err := pj.ReadFile(path, project.GetFileOption{IsTemplate: true, Role: role})
if err != nil {
return fmt.Errorf("read file error: %w", err)
return errors.Wrapf(err, "failed to read file %q", path)
}
result, err := tmpl.Parse(vars, string(data))
if err != nil {
return fmt.Errorf("parse file error: %w", err)
return errors.Wrapf(err, "failed to parse file %q", path)
}
dest := ta.dest
if strings.HasSuffix(ta.dest, "/") {
rel, err := pj.Rel(ta.src, path, project.GetFileOption{IsTemplate: true, Role: role})
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
return errors.Wrap(err, "failed to get relative filepath")
}
dest = filepath.Join(ta.dest, rel)
}
if err := conn.PutFile(ctx, result, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to put file")
}
return nil
}); err != nil {
return err
return errors.Wrapf(err, "failed to walk dir %q", ta.src)
}
return nil
@ -233,13 +228,13 @@ func (ta templateArgs) absDir(ctx context.Context, conn connector.Connector, var
return nil
}
if err != nil {
return fmt.Errorf("walk dir %s error: %w", ta.src, err)
return errors.WithStack(err)
}
// get file old mode
info, err := d.Info()
if err != nil {
return fmt.Errorf("get file info error: %w", err)
return errors.Wrapf(err, "failed to get file %q info", path)
}
mode := info.Mode()
if ta.mode != nil {
@ -248,29 +243,29 @@ func (ta templateArgs) absDir(ctx context.Context, conn connector.Connector, var
// read file
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("read file error: %w", err)
return errors.Wrapf(err, "failed to read file %q", path)
}
result, err := tmpl.Parse(vars, string(data))
if err != nil {
return fmt.Errorf("parse file error: %w", err)
return errors.Wrapf(err, "failed to parse file %q", path)
}
// copy file to remote
dest := ta.dest
if strings.HasSuffix(ta.dest, "/") {
rel, err := filepath.Rel(ta.src, path)
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
return errors.Wrap(err, "failed to get relative filepath")
}
dest = filepath.Join(ta.dest, rel)
}
if err := conn.PutFile(ctx, result, dest, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
return errors.Wrap(err, "failed to put file")
}
return nil
}); err != nil {
return err
return errors.Wrapf(err, "failed to walk dir %q", ta.src)
}
return nil

View File

@ -20,11 +20,11 @@ limitations under the License.
package project
import (
"errors"
"io/fs"
"os"
"path/filepath"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
@ -33,21 +33,21 @@ import (
)
func init() {
builtinProjectFunc = func(pipeline kkcorev1.Pipeline) (Project, error) {
if pipeline.Spec.Playbook == "" {
builtinProjectFunc = func(playbook kkcorev1.Playbook) (Project, error) {
if playbook.Spec.Playbook == "" {
return nil, errors.New("playbook should not be empty")
}
if filepath.IsAbs(pipeline.Spec.Playbook) {
if filepath.IsAbs(playbook.Spec.Playbook) {
return nil, errors.New("playbook should be relative path base on project.addr")
}
return &builtinProject{Pipeline: pipeline, FS: core.BuiltinPipeline, playbook: pipeline.Spec.Playbook}, nil
return &builtinProject{Playbook: playbook, FS: core.BuiltinPlaybook, playbook: playbook.Spec.Playbook}, nil
}
}
type builtinProject struct {
kkcorev1.Pipeline
kkcorev1.Playbook
fs.FS
// playbook relpath base on projectDir

View File

@ -18,36 +18,34 @@ package project
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/transport/http"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"k8s.io/klog/v2"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
func newGitProject(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) {
if pipeline.Spec.Playbook == "" || pipeline.Spec.Project.Addr == "" {
func newGitProject(ctx context.Context, playbook kkcorev1.Playbook, update bool) (Project, error) {
if playbook.Spec.Playbook == "" || playbook.Spec.Project.Addr == "" {
return nil, errors.New("playbook and project.addr should not be empty")
}
if filepath.IsAbs(pipeline.Spec.Playbook) {
if filepath.IsAbs(playbook.Spec.Playbook) {
return nil, errors.New("playbook should be relative path base on project.addr")
}
// get project_dir from pipeline
projectDir, err := pipeline.Spec.Config.GetValue("project_dir")
// get project_dir from playbook
projectDir, err := playbook.Spec.Config.GetValue("project_dir")
if err != nil {
return nil, fmt.Errorf("project_dir is not defined. error is %w", err)
return nil, errors.Wrap(err, "project_dir is not defined")
}
pd, ok := projectDir.(string)
if !ok {
@ -55,25 +53,25 @@ func newGitProject(ctx context.Context, pipeline kkcorev1.Pipeline, update bool)
}
// git clone to project dir
if pipeline.Spec.Project.Name == "" {
pipeline.Spec.Project.Name = strings.TrimSuffix(pipeline.Spec.Project.Addr[strings.LastIndex(pipeline.Spec.Project.Addr, "/")+1:], ".git")
if playbook.Spec.Project.Name == "" {
playbook.Spec.Project.Name = strings.TrimSuffix(playbook.Spec.Project.Addr[strings.LastIndex(playbook.Spec.Project.Addr, "/")+1:], ".git")
}
p := &gitProject{
Pipeline: pipeline,
projectDir: filepath.Join(pd, pipeline.Spec.Project.Name),
playbook: pipeline.Spec.Playbook,
Playbook: playbook,
projectDir: filepath.Join(pd, playbook.Spec.Project.Name),
playbook: playbook.Spec.Playbook,
}
if _, err := os.Stat(p.projectDir); os.IsNotExist(err) {
// git clone
if err := p.gitClone(ctx); err != nil {
return nil, fmt.Errorf("clone git project error: %w", err)
return nil, errors.Wrapf(err, "failed to clone git project")
}
} else if update {
// git pull
if err := p.gitPull(ctx); err != nil {
return nil, fmt.Errorf("pull git project error: %w", err)
return nil, errors.Wrapf(err, "failed to pull git project")
}
}
@ -82,7 +80,7 @@ func newGitProject(ctx context.Context, pipeline kkcorev1.Pipeline, update bool)
// gitProject from git
type gitProject struct {
kkcorev1.Pipeline
kkcorev1.Playbook
//location
projectDir string
@ -123,16 +121,14 @@ func (p gitProject) getFilePath(path string, o GetFileOption) string {
func (p gitProject) gitClone(ctx context.Context) error {
if _, err := git.PlainCloneContext(ctx, p.projectDir, false, &git.CloneOptions{
URL: p.Pipeline.Spec.Project.Addr,
URL: p.Playbook.Spec.Project.Addr,
Progress: nil,
ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch),
ReferenceName: plumbing.NewBranchReferenceName(p.Playbook.Spec.Project.Branch),
SingleBranch: true,
Auth: &http.TokenAuth{Token: p.Pipeline.Spec.Project.Token},
Auth: &http.TokenAuth{Token: p.Playbook.Spec.Project.Token},
InsecureSkipTLS: false,
}); err != nil {
klog.Errorf("clone project %s failed: %v", p.Pipeline.Spec.Project.Addr, err)
return err
return errors.Wrapf(err, "failed to clone project %q", p.Playbook.Spec.Project.Addr)
}
return nil
@ -141,28 +137,22 @@ func (p gitProject) gitClone(ctx context.Context) error {
func (p gitProject) gitPull(ctx context.Context) error {
open, err := git.PlainOpen(p.projectDir)
if err != nil {
klog.V(4).ErrorS(err, "git open error", "local_dir", p.projectDir)
return err
return errors.Wrapf(err, "failed to open git project %a", p.projectDir)
}
wt, err := open.Worktree()
if err != nil {
klog.V(4).ErrorS(err, "git open worktree error", "local_dir", p.projectDir)
return err
return errors.Wrapf(err, "failed to open git project %q worktree", p.projectDir)
}
if err := wt.PullContext(ctx, &git.PullOptions{
RemoteURL: p.Pipeline.Spec.Project.Addr,
ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch),
RemoteURL: p.Playbook.Spec.Project.Addr,
ReferenceName: plumbing.NewBranchReferenceName(p.Playbook.Spec.Project.Branch),
SingleBranch: true,
Auth: &http.TokenAuth{Token: p.Pipeline.Spec.Project.Token},
Auth: &http.TokenAuth{Token: p.Playbook.Spec.Project.Token},
InsecureSkipTLS: false,
}); err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
klog.V(4).ErrorS(err, "git pull error", "local_dir", p.projectDir)
return err
return errors.Wrapf(err, "failed to pull git project %q", p.playbook)
}
return nil
@ -170,7 +160,7 @@ func (p gitProject) gitPull(ctx context.Context) error {
// MarshalPlaybook project file to playbook.
func (p gitProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) {
return marshalPlaybook(os.DirFS(p.projectDir), p.Pipeline.Spec.Playbook)
return marshalPlaybook(os.DirFS(p.projectDir), p.Playbook.Spec.Playbook)
}
// Stat role/file/template file or dir in project

View File

@ -17,11 +17,11 @@ limitations under the License.
package project
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"github.com/cockroachdb/errors"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
"gopkg.in/yaml.v3"
@ -34,19 +34,19 @@ func marshalPlaybook(baseFS fs.FS, pbPath string) (*kkprojectv1.Playbook, error)
// convert playbook to kkprojectv1.Playbook
pb := &kkprojectv1.Playbook{}
if err := loadPlaybook(baseFS, pbPath, pb); err != nil {
return nil, fmt.Errorf("load playbook failed: %w", err)
return nil, errors.Wrap(err, "failed to load playbook")
}
// convertRoles.
if err := convertRoles(baseFS, pbPath, pb); err != nil {
return nil, fmt.Errorf("convert roles failed: %w", err)
return nil, errors.Wrap(err, "failed to convert roles")
}
// convertIncludeTasks
if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil {
return nil, fmt.Errorf("convert include tasks failed: %w", err)
return nil, errors.Wrap(err, "failed to convert include tasks")
}
// validate playbook
if err := pb.Validate(); err != nil {
return nil, fmt.Errorf("validate playbook failed: %w", err)
return nil, errors.Wrap(err, "failed to validate playbook")
}
return pb, nil
@ -57,24 +57,24 @@ func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
// baseDir is the local ansible project dir which playbook belong to
pbData, err := fs.ReadFile(baseFS, pbPath)
if err != nil {
return fmt.Errorf("read playbook %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to read playbook %q", pbPath)
}
var plays []kkprojectv1.Play
if err := yaml.Unmarshal(pbData, &plays); err != nil {
return fmt.Errorf("unmarshal playbook %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to unmarshal playbook %q", pbPath)
}
for _, p := range plays {
if err := dealImportPlaybook(p, baseFS, pbPath, pb); err != nil {
return fmt.Errorf("load import_playbook in playbook %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to load import_playbook in playbook %q", pbPath)
}
if err := dealVarsFiles(&p, baseFS, pbPath); err != nil {
return fmt.Errorf("load vars_files in playbook %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to load vars_files in playbook %q", pbPath)
}
// fill block in roles
if err := dealRoles(p, baseFS, pbPath); err != nil {
return fmt.Errorf("load roles in playbook %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to load roles in playbook %q failed: %w", pbPath)
}
pb.Play = append(pb.Play, p)
@ -88,10 +88,10 @@ func dealImportPlaybook(p kkprojectv1.Play, baseFS fs.FS, pbPath string, pb *kkp
if p.ImportPlaybook != "" {
importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook)
if importPlaybook == "" {
return fmt.Errorf("import_playbook %q path is empty, it's maybe [project-dir/playbooks/import_playbook_file, playbook-dir/playbooks/import_playbook-file, playbook-dir/import_playbook-file]", p.ImportPlaybook)
return errors.Errorf("import_playbook %q path is empty, it's maybe [project-dir/playbooks/import_playbook_file, playbook-dir/playbooks/import_playbook-file, playbook-dir/import_playbook-file]", p.ImportPlaybook)
}
if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
return fmt.Errorf("load playbook %q failed: %w", importPlaybook, err)
return errors.Wrapf(err, "failed to load playbook %q", importPlaybook)
}
}
@ -103,16 +103,16 @@ func dealVarsFiles(p *kkprojectv1.Play, baseFS fs.FS, pbPath string) error {
for _, file := range p.VarsFiles {
// load vars from vars_files
if _, err := fs.Stat(baseFS, filepath.Join(filepath.Dir(pbPath), file)); err != nil {
return fmt.Errorf("file %q not exists", file)
return errors.Wrapf(err, "failed to stat file %q", file)
}
data, err := fs.ReadFile(baseFS, filepath.Join(filepath.Dir(pbPath), file))
if err != nil {
return fmt.Errorf("read file %q failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
return errors.Wrapf(err, "failed to read file %q", filepath.Join(filepath.Dir(pbPath), file))
}
var newVars map[string]any
// Unmarshal the YAML document into a root node.
if err := yaml.Unmarshal(data, &newVars); err != nil {
return fmt.Errorf("failed to unmarshal YAML: %w", err)
return errors.Wrap(err, "failed to failed to unmarshal YAML")
}
// store vars in play. the vars defined in file should not be repeated.
p.Vars = variable.CombineVariables(newVars, p.Vars)
@ -126,21 +126,21 @@ func dealRoles(p kkprojectv1.Play, baseFS fs.FS, pbPath string) error {
for i, r := range p.Roles {
roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if roleBase == "" {
return fmt.Errorf("cannot found Role %q", r.Role)
return errors.Errorf("cannot found Role %q", r.Role)
}
mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
if mainTask == "" {
return fmt.Errorf("cannot found main task for Role %q", r.Role)
return errors.Errorf("cannot found main task for Role %q", r.Role)
}
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
return fmt.Errorf("read file %q failed: %w", mainTask, err)
return errors.Wrapf(err, "failed to read file %q", mainTask)
}
var blocks []kkprojectv1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
return fmt.Errorf("unmarshal yaml file %q failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
return errors.Wrapf(err, "failed to unmarshal yaml file %q", filepath.Join(filepath.Dir(pbPath), mainTask))
}
p.Roles[i].Block = blocks
}
@ -154,16 +154,16 @@ func convertRoles(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
for i, r := range p.Roles {
roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if roleBase == "" {
return fmt.Errorf("cannot found Role %q in playbook %q", r.Role, pbPath)
return errors.Errorf("cannot found Role %q in playbook %q", r.Role, pbPath)
}
var err error
if p.Roles[i].Block, err = convertRoleBlocks(baseFS, pbPath, roleBase); err != nil {
return fmt.Errorf("convert role %q tasks in playbook %q failed: %w", r.Role, pbPath, err)
return errors.Wrapf(err, "failed to convert role %q tasks in playbook %q", r.Role, pbPath)
}
if err = convertRoleVars(baseFS, roleBase, &p.Roles[i]); err != nil {
return fmt.Errorf("convert role %q defaults in playbook %q failed: %w", r.Role, pbPath, err)
return errors.Wrapf(err, "failed to convert role %q defaults in playbook %q", r.Role, pbPath)
}
}
pb.Play[i] = p
@ -178,13 +178,13 @@ func convertRoleVars(baseFS fs.FS, roleBase string, role *kkprojectv1.Role) erro
if defaultsFile != "" {
data, err := fs.ReadFile(baseFS, defaultsFile)
if err != nil {
return fmt.Errorf("read defaults variable file %q failed: %w", defaultsFile, err)
return errors.Wrapf(err, "failed to read defaults variable file %q", defaultsFile)
}
var newVars map[string]any
// Unmarshal the YAML document into a root node.
if err := yaml.Unmarshal(data, &newVars); err != nil {
return fmt.Errorf("failed to unmarshal YAML: %w", err)
return errors.Wrap(err, "failed to unmarshal YAML")
}
// store vars in play. the vars defined in file should not be repeated.
role.Vars = variable.CombineVariables(newVars, role.Vars)
@ -197,16 +197,16 @@ func convertRoleVars(baseFS fs.FS, roleBase string, role *kkprojectv1.Role) erro
func convertRoleBlocks(baseFS fs.FS, pbPath string, roleBase string) ([]kkprojectv1.Block, error) {
mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile))
if mainTask == "" {
return nil, fmt.Errorf("cannot found main task for Role %q", roleBase)
return nil, errors.Errorf("cannot found main task for Role %q", roleBase)
}
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
return nil, fmt.Errorf("read file %q failed: %w", mainTask, err)
return nil, errors.Wrapf(err, "failed to read file %q", mainTask)
}
var blocks []kkprojectv1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
return nil, fmt.Errorf("unmarshal yaml file %q failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
return nil, errors.Wrapf(err, "failed to unmarshal yaml file %q", filepath.Join(filepath.Dir(pbPath), mainTask))
}
return blocks, nil
@ -217,21 +217,21 @@ func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook)
var pbBase = filepath.Dir(filepath.Dir(pbPath))
for _, play := range pb.Play {
if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
return fmt.Errorf("convert pre_tasks file %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to convert pre_tasks file %q", pbPath)
}
if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
return fmt.Errorf("convert tasks file %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to convert tasks file %q", pbPath)
}
if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
return fmt.Errorf("convert post_tasks file %q failed: %w", pbPath, err)
return errors.Wrapf(err, "failed to convert post_tasks file %q", pbPath)
}
for _, r := range play.Roles {
roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
return fmt.Errorf("convert role %q failed: %w", filepath.Join(pbPath, r.Role), err)
return errors.Wrapf(err, "failed to convert role %q", filepath.Join(pbPath, r.Role))
}
}
}
@ -244,11 +244,11 @@ func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkprojectv1.Block) error
if b.IncludeTasks != "" {
data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks))
if err != nil {
return fmt.Errorf("read includeTask file %q failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
return errors.Wrapf(err, "failed to read includeTask file %q", filepath.Join(baseDir, b.IncludeTasks))
}
var bs []kkprojectv1.Block
if err := yaml.Unmarshal(data, &bs); err != nil {
return fmt.Errorf("unmarshal includeTask file %q failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
return errors.Wrapf(err, "failed to unmarshal includeTask file %q", filepath.Join(baseDir, b.IncludeTasks))
}
b.Block = bs
@ -256,15 +256,15 @@ func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkprojectv1.Block) error
}
if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
return fmt.Errorf("convert block file %q failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
return errors.Wrapf(err, "failed to convert block file %q", filepath.Join(baseDir, b.IncludeTasks))
}
if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
return fmt.Errorf("convert rescue file %q failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
return errors.Wrapf(err, "failed to convert rescue file %q", filepath.Join(baseDir, b.IncludeTasks))
}
if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
return fmt.Errorf("convert always file %q failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
return errors.Wrapf(err, "failed to convert always file %q", filepath.Join(baseDir, b.IncludeTasks))
}
}

View File

@ -17,50 +17,49 @@ limitations under the License.
package project
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
func newLocalProject(pipeline kkcorev1.Pipeline) (Project, error) {
if !filepath.IsAbs(pipeline.Spec.Playbook) {
if pipeline.Spec.Project.Addr == "" {
func newLocalProject(playbook kkcorev1.Playbook) (Project, error) {
if !filepath.IsAbs(playbook.Spec.Playbook) {
if playbook.Spec.Project.Addr == "" {
wd, err := os.Getwd()
if err != nil {
return nil, err
}
pipeline.Spec.Project.Addr = wd
playbook.Spec.Project.Addr = wd
}
pipeline.Spec.Playbook = filepath.Join(pipeline.Spec.Project.Addr, pipeline.Spec.Playbook)
playbook.Spec.Playbook = filepath.Join(playbook.Spec.Project.Addr, playbook.Spec.Playbook)
}
if _, err := os.Stat(pipeline.Spec.Playbook); err != nil {
return nil, fmt.Errorf("cannot find playbook %s", pipeline.Spec.Playbook)
if _, err := os.Stat(playbook.Spec.Playbook); err != nil {
return nil, errors.Wrapf(err, "cannot find playbook %q", playbook.Spec.Playbook)
}
if filepath.Base(filepath.Dir(pipeline.Spec.Playbook)) != _const.ProjectPlaybooksDir {
if filepath.Base(filepath.Dir(playbook.Spec.Playbook)) != _const.ProjectPlaybooksDir {
// the format of playbook is not correct
return nil, errors.New("playbook should be projectDir/playbooks/playbookfile")
}
projectDir := filepath.Dir(filepath.Dir(pipeline.Spec.Playbook))
playbook, err := filepath.Rel(projectDir, pipeline.Spec.Playbook)
projectDir := filepath.Dir(filepath.Dir(playbook.Spec.Playbook))
pb, err := filepath.Rel(projectDir, playbook.Spec.Playbook)
if err != nil {
return nil, err
}
return &localProject{Pipeline: pipeline, projectDir: projectDir, playbook: playbook}, nil
return &localProject{Playbook: playbook, projectDir: projectDir, playbook: pb}, nil
}
type localProject struct {
kkcorev1.Pipeline
kkcorev1.Playbook
projectDir string
// playbook relpath base on projectDir

View File

@ -26,15 +26,20 @@ import (
kkprojectv1 "github.com/kubesphere/kubekey/api/project/v1"
)
var builtinProjectFunc func(kkcorev1.Pipeline) (Project, error)
var builtinProjectFunc func(kkcorev1.Playbook) (Project, error)
// Project represent location of actual project.
// get project file should base on it
type Project interface {
// MarshalPlaybook project file to playbook.
MarshalPlaybook() (*kkprojectv1.Playbook, error)
// Stat file or dir in project
Stat(path string, option GetFileOption) (os.FileInfo, error)
// WalkDir dir in project
WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error
// ReadFile file or dir in project
ReadFile(path string, option GetFileOption) ([]byte, error)
// Rel path file or dir in project
Rel(root string, path string, option GetFileOption) (string, error)
}
@ -47,18 +52,18 @@ type GetFileOption struct {
// New project.
// If project address is git format. newGitProject
// If pipeline has BuiltinsProjectAnnotation. builtinProjectFunc
// If playbook has BuiltinsProjectAnnotation. builtinProjectFunc
// Default newLocalProject
func New(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) {
if strings.HasPrefix(pipeline.Spec.Project.Addr, "https://") ||
strings.HasPrefix(pipeline.Spec.Project.Addr, "http://") ||
strings.HasPrefix(pipeline.Spec.Project.Addr, "git@") {
return newGitProject(ctx, pipeline, update)
func New(ctx context.Context, playbook kkcorev1.Playbook, update bool) (Project, error) {
if strings.HasPrefix(playbook.Spec.Project.Addr, "https://") ||
strings.HasPrefix(playbook.Spec.Project.Addr, "http://") ||
strings.HasPrefix(playbook.Spec.Project.Addr, "git@") {
return newGitProject(ctx, playbook, update)
}
if _, ok := pipeline.Annotations[kkcorev1.BuiltinsProjectAnnotation]; ok {
return builtinProjectFunc(pipeline)
if _, ok := playbook.Annotations[kkcorev1.BuiltinsProjectAnnotation]; ok {
return builtinProjectFunc(playbook)
}
return newLocalProject(pipeline)
return newLocalProject(playbook)
}

View File

@ -17,13 +17,12 @@ limitations under the License.
package proxy
import (
"errors"
"fmt"
"net/http"
"reflect"
"strings"
"time"
"github.com/cockroachdb/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -34,7 +33,6 @@ import (
"k8s.io/apiserver/pkg/features"
apirest "k8s.io/apiserver/pkg/registry/rest"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
@ -67,7 +65,7 @@ func (o *resourceOptions) init() error {
var prefix string
scoper, ok := o.storage.(apirest.Scoper)
if !ok {
return fmt.Errorf("%q must implement scoper", o.path)
return errors.Errorf("%q must implement scoper", o.path)
}
if scoper.NamespaceScoped() {
prefix = "/namespaces/{namespace}/"
@ -111,9 +109,7 @@ func newAPIIResources(gv schema.GroupVersion) *apiResources {
// AddResource add a api-resources
func (r *apiResources) AddResource(o resourceOptions) error {
if err := o.init(); err != nil {
klog.V(6).ErrorS(err, "Failed to initialize resourceOptions")
return err
return errors.Wrap(err, "failed to initialize resourceOptions")
}
r.resourceOptions = append(r.resourceOptions, o)
storageVersionProvider, isStorageVersionProvider := o.storage.(apirest.StorageVersionProvider)
@ -124,9 +120,7 @@ func (r *apiResources) AddResource(o resourceOptions) error {
versioner := storageVersionProvider.StorageVersion()
gvk, err := getStorageVersionKind(versioner, o.storage, r.typer)
if err != nil {
klog.V(6).ErrorS(err, "failed to get storage version kind", "storage", reflect.TypeOf(o.storage))
return err
return errors.Wrapf(err, "failed to get storage %q version kind", reflect.TypeOf(o.storage))
}
apiResource.Group = gvk.Group
apiResource.Version = gvk.Version
@ -146,7 +140,7 @@ func (r *apiResources) AddResource(o resourceOptions) error {
if o.subresource == "" {
singularNameProvider, ok := o.storage.(apirest.SingularNameProvider)
if !ok {
return fmt.Errorf("resource %s must implement SingularNameProvider", o.path)
return errors.Errorf("resource %s must implement SingularNameProvider", o.path)
}
apiResource.SingularName = singularNameProvider.GetSingularName()
}
@ -171,7 +165,7 @@ func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage apir
}
gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds)
if !ok {
return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
return schema.GroupVersionKind{}, errors.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
}
return gvk, nil

View File

@ -18,13 +18,13 @@ package internal
import (
"context"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"github.com/cockroachdb/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -42,6 +42,7 @@ const (
// after delete event is handled, the file will be deleted from disk.
deleteTagSuffix = "-deleted"
// the file type of resource will store local.
// NOTE: the variable will store in playbook dir, add file-suffix to distinct it.
yamlSuffix = ".yaml"
)
@ -84,45 +85,29 @@ func (s fileStorage) Create(_ context.Context, key string, obj, out runtime.Obje
// set resourceVersion to obj
metaObj, err := meta.Accessor(obj)
if err != nil {
klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key))
return err
return errors.Wrapf(err, "failed to get meta from object %q", key)
}
metaObj.SetResourceVersion("1")
// create file to local disk
if _, err := os.Stat(filepath.Dir(key)); err != nil {
if !os.IsNotExist(err) {
klog.V(6).ErrorS(err, "failed to check dir", "path", filepath.Dir(key))
return err
return errors.Wrapf(err, "failed to check dir %q", filepath.Dir(key))
}
if err := os.MkdirAll(filepath.Dir(key), os.ModePerm); err != nil {
klog.V(6).ErrorS(err, "failed to create dir", "path", filepath.Dir(key))
return err
return errors.Wrapf(err, "failed to create dir %q", filepath.Dir(key))
}
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
klog.V(6).ErrorS(err, "failed to encode resource file", "path", key)
return err
return errors.Wrapf(err, "failed to encode object %q", key)
}
// render to out
if out != nil {
err = decode(s.codec, data, out)
if err != nil {
klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
return err
}
if err := decode(s.codec, data, out); err != nil {
return errors.Wrapf(err, "failed to decode object %q", key)
}
// render to file
if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil {
klog.V(6).ErrorS(err, "failed to create resource file", "path", key)
return err
return errors.Wrapf(err, "failed to create object %q", key)
}
return nil
@ -134,30 +119,21 @@ func (s fileStorage) Delete(ctx context.Context, key string, out runtime.Object,
out = cachedExistingObject
} else {
if err := s.Get(ctx, key, apistorage.GetOptions{}, out); err != nil {
klog.V(6).ErrorS(err, "failed to get resource", "path", key)
return err
return errors.Wrapf(err, "failed to get object %q", key)
}
}
if err := preconditions.Check(key, out); err != nil {
klog.V(6).ErrorS(err, "failed to check preconditions", "path", key)
return err
return errors.Wrapf(err, "failed to check preconditions for object %q", key)
}
if err := validateDeletion(ctx, out); err != nil {
klog.V(6).ErrorS(err, "failed to validate deletion", "path", key)
return err
return errors.Wrapf(err, "failed to validate deletion for object %q", err)
}
// delete object
// rename file to trigger watcher
// delete object: rename file to trigger watcher, it will actual delete by watcher.
if err := os.Rename(key+yamlSuffix, key+yamlSuffix+deleteTagSuffix); err != nil {
klog.V(6).ErrorS(err, "failed to rename resource file", "path", key)
return err
return errors.Wrapf(err, "failed to rename object %q to del-object %q", key+yamlSuffix, key+yamlSuffix+deleteTagSuffix)
}
return nil
@ -172,14 +148,10 @@ func (s fileStorage) Watch(_ context.Context, key string, _ apistorage.ListOptio
func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions, out runtime.Object) error {
data, err := os.ReadFile(key + yamlSuffix)
if err != nil {
klog.V(6).ErrorS(err, "failed to read resource file", "path", key)
return err
return errors.Wrapf(err, "failed to read object file %q", key)
}
if err := decode(s.codec, data, out); err != nil {
klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
return err
return errors.Wrapf(err, "failed to decode object %q", key)
}
return nil
@ -189,23 +161,23 @@ func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions,
func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.ListOptions, listObj runtime.Object) error {
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
return errors.Wrapf(err, "failed to get object list items of %q", key)
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %w", err)
return errors.Wrapf(err, "need ptr to slice of %q", key)
}
// Build matching rules for resource version and continue key.
resourceVersionMatchRule, continueKeyMatchRule, err := s.buildMatchRules(key, opts, &sync.Once{})
if err != nil {
return err
return errors.Wrapf(err, "failed to build matchRules %q", key)
}
// Get the root entries in the directory corresponding to 'key'.
rootEntries, isAllNamespace, err := s.getRootEntries(key)
if err != nil {
return err
return errors.Wrapf(err, "failed to get root entries %q", key)
}
var lastKey string
@ -243,9 +215,7 @@ func (s fileStorage) buildMatchRules(key string, opts apistorage.ListOptions, st
// If continue token is present, set up a rule to start reading after the continueKey.
continueKey, _, err := apistorage.DecodeContinue(opts.Predicate.Continue, key)
if err != nil {
klog.V(6).ErrorS(err, "failed to parse continueKey", "continueKey", opts.Predicate.Continue)
return nil, nil, fmt.Errorf("invalid continue token: %w", err)
return nil, nil, errors.Wrapf(err, "invalid continue token of %q", key)
}
continueKeyMatchRule = func(key string) bool {
@ -260,7 +230,7 @@ func (s fileStorage) buildMatchRules(key string, opts apistorage.ListOptions, st
// Handle resource version matching based on the provided match rule.
parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, nil, fmt.Errorf("invalid resource version: %w", err)
return nil, nil, errors.Wrapf(err, "invalid resource version of %q", key)
}
switch opts.ResourceVersionMatch {
case metav1.ResourceVersionMatchNotOlderThan:
@ -270,7 +240,7 @@ func (s fileStorage) buildMatchRules(key string, opts apistorage.ListOptions, st
case "":
// Legacy case: match all resource versions.
default:
return nil, nil, fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch)
return nil, nil, errors.Errorf("unknown ResourceVersionMatch value %q of %q", opts.ResourceVersionMatch, key)
}
}
@ -290,19 +260,11 @@ func (s fileStorage) getRootEntries(key string) ([]os.DirEntry, bool, error) {
// get all resources from key. key is runtimeDir
allNamespace = false
default:
klog.V(6).ErrorS(nil, "key is invalid", "key", key)
return nil, false, fmt.Errorf("key is invalid: %s", key)
return nil, false, errors.Errorf("key is invalid: %s", key)
}
rootEntries, err := os.ReadDir(key)
if err != nil && !os.IsNotExist(err) {
klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key)
return nil, allNamespace, err
}
return rootEntries, allNamespace, nil
return rootEntries, allNamespace, err
}
// processNamespaceDirectory handles the traversal and processing of a namespace directory.
@ -317,15 +279,14 @@ func (s fileStorage) processNamespaceDirectory(key string, ns os.DirEntry, v ref
if os.IsNotExist(err) {
return nil
}
klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", nsDir)
return err
return errors.Wrapf(err, "failed to read dir %q", nsDir)
}
for _, entry := range entries {
err := s.processResourceFile(nsDir, entry, v, continueKeyMatchRule, resourceVersionMatchRule, lastKey, opts, listObj)
if err != nil {
return err
return errors.WithStack(err)
}
// Check if we have reached the limit of results requested by the client.
if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit {
@ -351,30 +312,22 @@ func (s fileStorage) processResourceFile(parentDir string, entry os.DirEntry, v
data, err := os.ReadFile(currentKey)
if err != nil {
klog.V(6).ErrorS(err, "failed to read resource file", "path", currentKey)
return err
return errors.Wrapf(err, "failed to read object %q", currentKey)
}
obj, _, err := s.codec.Decode(data, nil, getNewItem(listObj, v))
if err != nil {
klog.V(6).ErrorS(err, "failed to decode resource file", "path", currentKey)
return err
return errors.Wrapf(err, "failed to decode object %q", currentKey)
}
metaObj, err := meta.Accessor(obj)
if err != nil {
klog.V(6).ErrorS(err, "failed to get meta object", "path", currentKey)
return err
return errors.Wrapf(err, "failed to get object %q meta", currentKey)
}
rv, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion())
if err != nil {
klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion())
return err
return errors.Wrapf(err, "failed to parse resource version %q", metaObj.GetResourceVersion())
}
// Apply the resource version match rule.
@ -401,7 +354,7 @@ func (s fileStorage) handleResult(listObj runtime.Object, v reflect.Value, lastK
// If there are more results, set the continuation token for the next query.
next, err := apistorage.EncodeContinue(lastKey+"\x00", "", 0)
if err != nil {
return err
return errors.Wrapf(err, "failed to encode continue %q", lastKey)
}
return s.versioner.UpdateList(listObj, 1, next, nil)
@ -419,56 +372,40 @@ func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destinati
} else {
oldObj = s.newFunc()
if err := s.Get(ctx, key, apistorage.GetOptions{IgnoreNotFound: ignoreNotFound}, oldObj); err != nil {
klog.V(6).ErrorS(err, "failed to get resource", "path", key)
return err
return errors.Wrapf(err, "failed to get object %q", key)
}
}
if err := preconditions.Check(key, oldObj); err != nil {
klog.V(6).ErrorS(err, "failed to check preconditions", "path", key)
return err
return errors.Wrapf(err, "failed to check preconditions %q", key)
}
// set resourceVersion to obj
metaObj, err := meta.Accessor(oldObj)
if err != nil {
klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key))
return err
return errors.Wrapf(err, "failed to get object %q meta", filepath.Dir(key))
}
oldVersion, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion())
if err != nil {
klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion())
return err
return errors.Wrapf(err, "failed to parse resource version %q", metaObj.GetResourceVersion())
}
out, _, err := tryUpdate(oldObj, apistorage.ResponseMeta{ResourceVersion: oldVersion + 1})
if err != nil {
klog.V(6).ErrorS(err, "failed to try update", "path", key)
return err
return errors.Wrapf(err, "failed to try update %q", key)
}
data, err := runtime.Encode(s.codec, out)
if err != nil {
klog.V(6).ErrorS(err, "failed to encode resource file", "path", key)
return err
return errors.Wrapf(err, "failed to encode resource file %q", key)
}
// render to destination
if destination != nil {
err = decode(s.codec, data, destination)
if err != nil {
klog.V(6).ErrorS(err, "failed to decode resource file", "path", key)
return err
return errors.Wrapf(err, "failed to decode resource file %q", key)
}
}
// render to file
if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil {
klog.V(6).ErrorS(err, "failed to create resource file", "path", key)
return err
return errors.Wrapf(err, "failed to create resource file %q", key)
}
return nil
@ -480,10 +417,8 @@ func (s fileStorage) Count(key string) (int64, error) {
countByNSDir := func(dir string) (int64, error) {
var count int64
entries, err := os.ReadDir(dir)
if err != nil {
klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", dir)
// cannot read namespace dir
return 0, err
if err != nil { // cannot read namespace dir
return 0, errors.Wrapf(err, "failed to read namespaces dir %q", dir)
}
// count the file
for _, entry := range entries {
@ -500,9 +435,7 @@ func (s fileStorage) Count(key string) (int64, error) {
var count int64
rootEntries, err := os.ReadDir(key)
if err != nil && !os.IsNotExist(err) {
klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key)
return 0, err
return 0, errors.Wrapf(err, "failed to read runtime dir %q", key)
}
for _, ns := range rootEntries {
if !ns.IsDir() {
@ -520,9 +453,8 @@ func (s fileStorage) Count(key string) (int64, error) {
case 1: // count a namespace's resources
return countByNSDir(key)
default:
klog.V(6).ErrorS(nil, "key is invalid", "key", key)
// not support key
return 0, fmt.Errorf("key is invalid: %s", key)
return 0, errors.Errorf("key is invalid: %s", key)
}
}
@ -535,11 +467,11 @@ func (s fileStorage) RequestWatchProgress(context.Context) error {
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, value []byte, objPtr runtime.Object) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %w", err)
return errors.Wrap(err, "failed to convert output object to pointer")
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
return errors.Wrap(err, "failed to decode output object")
}
return nil

View File

@ -21,6 +21,7 @@ import (
"path/filepath"
"strings"
"github.com/cockroachdb/errors"
"github.com/fsnotify/fsnotify"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
@ -41,40 +42,30 @@ type fileWatcher struct {
func newFileWatcher(prefix, path string, codec runtime.Codec, newFunc func() runtime.Object) (watch.Interface, error) {
if _, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
klog.V(6).ErrorS(err, "failed to stat path", "path", path)
return nil, err
return nil, errors.Wrapf(err, "failed to stat path %q", path)
}
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to create dir %q", path)
}
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
klog.V(6).ErrorS(err, "failed to create file watcher", "path", path)
return nil, err
return nil, errors.Wrapf(err, "failed to create file watcher %q", path)
}
if err := watcher.Add(path); err != nil {
klog.V(6).ErrorS(err, "failed to add path to file watcher", "path", path)
return nil, err
return nil, errors.Wrapf(err, "failed to add path to file watcher %q", path)
}
// add namespace dir to watcher
if prefix == path {
entry, err := os.ReadDir(prefix)
if err != nil {
klog.V(6).ErrorS(err, "failed to read dir", "dir", path)
return nil, err
return nil, errors.Wrapf(err, "failed to read dir %q", path)
}
for _, e := range entry {
if e.IsDir() {
if err := watcher.Add(filepath.Join(prefix, e.Name())); err != nil {
klog.V(6).ErrorS(err, "failed to add namespace dir to file watcher", "dir", e.Name())
return nil, err
return nil, errors.Wrapf(err, "failed to add namespace dir to file watcher %q", e.Name())
}
}
}
@ -96,7 +87,7 @@ func newFileWatcher(prefix, path string, codec runtime.Codec, newFunc func() run
// Stop watch
func (w *fileWatcher) Stop() {
if err := w.watcher.Close(); err != nil {
klog.V(6).ErrorS(err, "failed to close file watcher")
klog.ErrorS(err, "failed to close file watcher")
}
}
@ -155,21 +146,15 @@ func (w *fileWatcher) watchFile(event fsnotify.Event) error {
}
data, err := os.ReadFile(event.Name)
if err != nil {
klog.V(6).ErrorS(err, "failed to read resource file", "event", event)
return err
return errors.Wrapf(err, "failed to read resource file %q", event.Name)
}
obj, _, err := w.codec.Decode(data, nil, w.newFunc())
if err != nil {
klog.V(6).ErrorS(err, "failed to decode resource file", "event", event)
return err
return errors.Wrapf(err, "failed to decode resource file %q", event.Name)
}
metaObj, err := meta.Accessor(obj)
if err != nil {
klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event)
return err
return errors.Wrapf(err, "failed to dconvert to meta object %q", event.Name)
}
if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file
klog.V(6).InfoS("name is empty. ignore", "event", event)

View File

@ -25,6 +25,8 @@ import (
"fmt"
"regexp"
"strings"
"github.com/cockroachdb/errors"
)
// PathExpression holds a compiled path expression (RegExp) needed to match against
@ -44,7 +46,7 @@ func newPathExpression(path string) (*pathExpression, error) {
expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
compiled, err := regexp.Compile(expression)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to compile regexp")
}
return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
package playbook
import (
"context"
@ -28,30 +28,30 @@ import (
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// PipelineStorage storage for Pipeline
type PipelineStorage struct {
Pipeline *REST
PipelineStatus *StatusREST
// PlaybookStorage storage for Playbook
type PlaybookStorage struct {
Playbook *REST
PlaybookStatus *StatusREST
}
// REST resource for Pipeline
// REST resource for Playbook
type REST struct {
*apiregistry.Store
}
// StatusREST status subresource for Pipeline
// StatusREST status subresource for Playbook
type StatusREST struct {
store *apiregistry.Store
}
// NamespaceScoped is true for Pipeline
// NamespaceScoped is true for Playbook
func (r *StatusREST) NamespaceScoped() bool {
return true
}
// New creates a new Node object.
func (r *StatusREST) New() runtime.Object {
return &kkcorev1.Pipeline{}
return &kkcorev1.Playbook{}
}
// Destroy cleans up resources on shutdown.
@ -82,30 +82,30 @@ func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object,
return r.store.ConvertToTable(ctx, object, tableOptions)
}
// NewStorage for Pipeline storage
func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (PipelineStorage, error) {
// NewStorage for Playbook storage
func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (PlaybookStorage, error) {
store := &apiregistry.Store{
NewFunc: func() runtime.Object { return &kkcorev1.Pipeline{} },
NewListFunc: func() runtime.Object { return &kkcorev1.PipelineList{} },
DefaultQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("pipelines").GroupResource(),
SingularQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("pipeline").GroupResource(),
NewFunc: func() runtime.Object { return &kkcorev1.Playbook{} },
NewListFunc: func() runtime.Object { return &kkcorev1.PlaybookList{} },
DefaultQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("playbooks").GroupResource(),
SingularQualifiedResource: kkcorev1.SchemeGroupVersion.WithResource("playbook").GroupResource(),
CreateStrategy: Strategy,
UpdateStrategy: Strategy,
DeleteStrategy: Strategy,
ReturnDeletedObject: true,
TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("pipelines").GroupResource()),
TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("playbooks").GroupResource()),
}
options := &apigeneric.StoreOptions{
RESTOptions: optsGetter,
}
if err := store.CompleteWithOptions(options); err != nil {
return PipelineStorage{}, err
return PlaybookStorage{}, err
}
return PipelineStorage{
Pipeline: &REST{store},
PipelineStatus: &StatusREST{store},
return PlaybookStorage{
Playbook: &REST{store},
PlaybookStatus: &StatusREST{store},
}, nil
}

View File

@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
package playbook
import (
"context"
"errors"
"reflect"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -30,67 +30,67 @@ import (
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
// pipelineStrategy implements behavior for Pods
type pipelineStrategy struct {
// playbookStrategy implements behavior for Pods
type playbookStrategy struct {
runtime.ObjectTyper
apinames.NameGenerator
}
// Strategy is the default logic that applies when creating and updating Pod
// objects via the REST API.
var Strategy = pipelineStrategy{_const.Scheme, apinames.SimpleNameGenerator}
var Strategy = playbookStrategy{_const.Scheme, apinames.SimpleNameGenerator}
// ===CreateStrategy===
// NamespaceScoped always true
func (t pipelineStrategy) NamespaceScoped() bool {
func (t playbookStrategy) NamespaceScoped() bool {
return true
}
// PrepareForCreate do no-thing
func (t pipelineStrategy) PrepareForCreate(context.Context, runtime.Object) {}
func (t playbookStrategy) PrepareForCreate(context.Context, runtime.Object) {}
// Validate always pass
func (t pipelineStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
func (t playbookStrategy) Validate(context.Context, runtime.Object) field.ErrorList {
// do nothing
return nil
}
// WarningsOnCreate do no-thing
func (t pipelineStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
func (t playbookStrategy) WarningsOnCreate(context.Context, runtime.Object) []string {
// do nothing
return nil
}
// Canonicalize do no-thing
func (t pipelineStrategy) Canonicalize(runtime.Object) {
func (t playbookStrategy) Canonicalize(runtime.Object) {
// do nothing
}
// ===UpdateStrategy===
// AllowCreateOnUpdate always false
func (t pipelineStrategy) AllowCreateOnUpdate() bool {
func (t playbookStrategy) AllowCreateOnUpdate() bool {
return false
}
// PrepareForUpdate do no-thing
func (t pipelineStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {
func (t playbookStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {
// do nothing
}
// ValidateUpdate spec is immutable
func (t pipelineStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList {
func (t playbookStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList {
// only support update status
pipeline, ok := obj.(*kkcorev1.Pipeline)
playbook, ok := obj.(*kkcorev1.Playbook)
if !ok {
return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
}
oldPipeline, ok := old.(*kkcorev1.Pipeline)
oldPlaybook, ok := old.(*kkcorev1.Playbook)
if !ok {
return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))}
}
if !reflect.DeepEqual(pipeline.Spec, oldPipeline.Spec) {
if !reflect.DeepEqual(playbook.Spec, oldPlaybook.Spec) {
return field.ErrorList{field.Forbidden(field.NewPath("spec"), "spec is immutable")}
}
@ -98,19 +98,19 @@ func (t pipelineStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Obj
}
// WarningsOnUpdate always nil
func (t pipelineStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
func (t playbookStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string {
// do nothing
return nil
}
// AllowUnconditionalUpdate always true
func (t pipelineStrategy) AllowUnconditionalUpdate() bool {
func (t playbookStrategy) AllowUnconditionalUpdate() bool {
return true
}
// ===ResetFieldsStrategy===
// GetResetFields always nil
func (t pipelineStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
func (t playbookStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
return nil
}

View File

@ -19,6 +19,7 @@ package task
import (
"context"
"github.com/cockroachdb/errors"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -102,11 +103,11 @@ func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (TaskStorage, error) {
options := &apigeneric.StoreOptions{
RESTOptions: optsGetter,
AttrFunc: GetAttrs,
TriggerFunc: map[string]apistorage.IndexerFunc{kkcorev1alpha1.TaskOwnerField: OwnerPipelineTriggerFunc},
TriggerFunc: map[string]apistorage.IndexerFunc{kkcorev1alpha1.TaskOwnerField: OwnerPlaybookTriggerFunc},
Indexers: Indexers(),
}
if err := store.CompleteWithOptions(options); err != nil {
return TaskStorage{}, err
return TaskStorage{}, errors.Wrap(err, "failed to complete store")
}
return TaskStorage{

View File

@ -18,9 +18,9 @@ package task
import (
"context"
"errors"
"reflect"
"github.com/cockroachdb/errors"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
@ -36,7 +36,7 @@ import (
_const "github.com/kubesphere/kubekey/v4/pkg/const"
)
const pipelineKind = "Pipeline"
const playbookKind = "Playbook"
// taskStrategy implements behavior for Pods
type taskStrategy struct {
@ -123,8 +123,8 @@ func (t taskStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
return nil
}
// OwnerPipelineIndexFunc return value ownerReference.object is pipeline.
func OwnerPipelineIndexFunc(obj any) ([]string, error) {
// OwnerPlaybookIndexFunc return value ownerReference.object is playbook.
func OwnerPlaybookIndexFunc(obj any) ([]string, error) {
task, ok := obj.(*kkcorev1alpha1.Task)
if !ok {
return nil, errors.New("not Task")
@ -132,7 +132,7 @@ func OwnerPipelineIndexFunc(obj any) ([]string, error) {
var index string
for _, reference := range task.OwnerReferences {
if reference.Kind == pipelineKind {
if reference.Kind == playbookKind {
index = types.NamespacedName{
Namespace: task.Namespace,
Name: reference.Name,
@ -142,7 +142,7 @@ func OwnerPipelineIndexFunc(obj any) ([]string, error) {
}
}
if index == "" {
return nil, errors.New("task has no ownerReference.pipeline")
return nil, errors.New("task has no ownerReference.playbook")
}
return []string{index}, nil
@ -151,7 +151,7 @@ func OwnerPipelineIndexFunc(obj any) ([]string, error) {
// Indexers returns the indexers for pod storage.
func Indexers() *cgtoolscache.Indexers {
return &cgtoolscache.Indexers{
apistorage.FieldIndex(kkcorev1alpha1.TaskOwnerField): OwnerPipelineIndexFunc,
apistorage.FieldIndex(kkcorev1alpha1.TaskOwnerField): OwnerPlaybookIndexFunc,
}
}
@ -183,7 +183,7 @@ func ToSelectableFields(task *kkcorev1alpha1.Task) fields.Set {
// be adjusted.
taskSpecificFieldsSet := make(fields.Set)
for _, reference := range task.OwnerReferences {
if reference.Kind == pipelineKind {
if reference.Kind == playbookKind {
taskSpecificFieldsSet[kkcorev1alpha1.TaskOwnerField] = types.NamespacedName{
Namespace: task.Namespace,
Name: reference.Name,
@ -196,11 +196,11 @@ func ToSelectableFields(task *kkcorev1alpha1.Task) fields.Set {
return apigeneric.AddObjectMetaFieldsSet(taskSpecificFieldsSet, &task.ObjectMeta, true)
}
// OwnerPipelineTriggerFunc returns value ownerReference is pipeline of given object.
func OwnerPipelineTriggerFunc(obj runtime.Object) string {
// OwnerPlaybookTriggerFunc returns value ownerReference is playbook of given object.
func OwnerPlaybookTriggerFunc(obj runtime.Object) string {
if task, ok := obj.(*kkcorev1alpha1.Task); ok {
for _, reference := range task.OwnerReferences {
if reference.Kind == pipelineKind {
if reference.Kind == playbookKind {
return types.NamespacedName{
Namespace: task.Namespace,
Name: reference.Name,

View File

@ -18,13 +18,13 @@ package proxy
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"sort"
"strings"
"github.com/cockroachdb/errors"
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
kkcorev1alpha1 "github.com/kubesphere/kubekey/api/core/v1alpha1"
"k8s.io/apimachinery/pkg/api/meta"
@ -48,7 +48,7 @@ import (
_const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/proxy/internal"
"github.com/kubesphere/kubekey/v4/pkg/proxy/resources/inventory"
"github.com/kubesphere/kubekey/v4/pkg/proxy/resources/pipeline"
"github.com/kubesphere/kubekey/v4/pkg/proxy/resources/playbook"
"github.com/kubesphere/kubekey/v4/pkg/proxy/resources/task"
)
@ -66,7 +66,7 @@ func RestConfig(runtimedir string, restconfig *rest.Config) error {
// NewProxyTransport return a new http.RoundTripper use in ctrl.client.
// When restConfig is not empty: should connect a kubernetes cluster and store some resources in there.
// Such as: pipeline.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1
// Such as: playbook.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1
// when restConfig is empty: store all resource in local.
//
// SPECIFICALLY: since tasks is running data, which is reentrant and large in quantity,
@ -83,7 +83,7 @@ func newProxyTransport(runtimedir string, restConfig *rest.Config) (http.RoundTr
if restConfig.Host != "" {
clientFor, err := rest.HTTPClientFor(restConfig)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to create http client")
}
lt.restClient = clientFor
}
@ -92,28 +92,22 @@ func newProxyTransport(runtimedir string, restConfig *rest.Config) (http.RoundTr
kkv1alpha1 := newAPIIResources(kkcorev1alpha1.SchemeGroupVersion)
storage, err := task.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1alpha1.SchemeGroupVersion))
if err != nil {
klog.V(6).ErrorS(err, "failed to create storage")
return nil, err
return nil, errors.Wrap(err, "failed to create task storage")
}
if err := kkv1alpha1.AddResource(resourceOptions{
path: "tasks",
storage: storage.Task,
}); err != nil {
klog.V(6).ErrorS(err, "failed to add resource")
return nil, err
return nil, errors.Wrap(err, "failed to add tasks resource")
}
if err := kkv1alpha1.AddResource(resourceOptions{
path: "tasks/status",
storage: storage.TaskStatus,
}); err != nil {
klog.V(6).ErrorS(err, "failed to add resource")
return nil, err
return nil, errors.Wrap(err, "failed to add tasks/status resource")
}
if err := lt.registerResources(kkv1alpha1); err != nil {
klog.V(6).ErrorS(err, "failed to register resources")
return nil, errors.Wrap(err, "failed to register v1alpha1 resources")
}
// when restConfig is null. should store all resource local
@ -123,46 +117,34 @@ func newProxyTransport(runtimedir string, restConfig *rest.Config) (http.RoundTr
// add inventory
inventoryStorage, err := inventory.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1.SchemeGroupVersion))
if err != nil {
klog.V(6).ErrorS(err, "failed to create storage")
return nil, err
return nil, errors.Wrap(err, "failed to create inventory storage")
}
if err := kkv1.AddResource(resourceOptions{
path: "inventories",
storage: inventoryStorage.Inventory,
}); err != nil {
klog.V(6).ErrorS(err, "failed to add resource")
return nil, err
return nil, errors.Wrap(err, "failed to add inventories resource")
}
// add pipeline
pipelineStorage, err := pipeline.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1.SchemeGroupVersion))
// add playbook
playbookStorage, err := playbook.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1.SchemeGroupVersion))
if err != nil {
klog.V(6).ErrorS(err, "failed to create storage")
return nil, err
return nil, errors.Wrap(err, "failed to create playbook storage")
}
if err := kkv1.AddResource(resourceOptions{
path: "pipelines",
storage: pipelineStorage.Pipeline,
path: "playbooks",
storage: playbookStorage.Playbook,
}); err != nil {
klog.V(6).ErrorS(err, "failed to add resource")
return nil, err
return nil, errors.Wrap(err, "failed to add playbooks resource")
}
if err := kkv1.AddResource(resourceOptions{
path: "pipelines/status",
storage: pipelineStorage.PipelineStatus,
path: "playbooks/status",
storage: playbookStorage.PlaybookStatus,
}); err != nil {
klog.V(6).ErrorS(err, "failed to add resource")
return nil, err
return nil, errors.Wrap(err, "failed to add playbooks/status resource")
}
if err := lt.registerResources(kkv1); err != nil {
klog.V(6).ErrorS(err, "failed to register resources")
return nil, err
return nil, errors.Wrap(err, "failed to register v1 resources")
}
}
@ -215,7 +197,7 @@ func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) {
// dispatch request
handler, err := l.detectDispatcher(request)
if err != nil {
return response, fmt.Errorf("no router for request. url: %s, method: %s", request.URL.Path, request.Method)
return response, errors.Wrapf(err, "no router for request. url: %s, method: %s", request.URL.Path, request.Method)
}
// call handler
l.handlerChainFunc(handler).ServeHTTP(&responseWriter{response}, request)
@ -257,7 +239,7 @@ func (l *transport) registerResources(resources *apiResources) error {
_, isTableProvider := o.storage.(apirest.TableConvertor)
if isLister && !isTableProvider {
// All listers must implement TableProvider
return fmt.Errorf("%q must implement TableConvertor", o.path)
return errors.Errorf("%q must implement TableConvertor", o.path)
}
// Get the list of actions for the given scope.
@ -298,7 +280,7 @@ func newReqScope(resources *apiResources, o resourceOptions, authz authorizer.Au
// request scope
fqKindToRegister, err := apiendpoints.GetResourceKind(resources.gv, o.storage, _const.Scheme)
if err != nil {
return apihandlers.RequestScope{}, err
return apihandlers.RequestScope{}, errors.Wrap(err, "failed to get resourcekind")
}
reqScope := apihandlers.RequestScope{
Namer: apihandlers.ContextBasedNaming{
@ -338,7 +320,7 @@ func newReqScope(resources *apiResources, o resourceOptions, authz authorizer.Au
resetFields,
)
if err != nil {
return apihandlers.RequestScope{}, err
return apihandlers.RequestScope{}, errors.Wrap(err, "failed to create default fieldManager")
}
return reqScope, nil

View File

@ -3,6 +3,7 @@ package util
import (
"context"
"github.com/cockroachdb/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/cluster-api/util/patch"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@ -36,11 +37,11 @@ func NewPatchHelper(client ctrlclient.Client, obj ...ctrlclient.Object) (*PatchH
}
helper, err := patch.NewHelper(o, client)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to create patch helper for object %q", ctrlclient.ObjectKeyFromObject(o))
}
gvk, err := apiutil.GVKForObject(o, client.Scheme())
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to get gvk for object %q", ctrlclient.ObjectKeyFromObject(o))
}
helpers[gvk] = helper
}
@ -67,14 +68,14 @@ func (p *PatchHelper) Patch(ctx context.Context, obj ...ctrlclient.Object) error
for _, o := range obj {
gvk, err := apiutil.GVKForObject(o, p.client.Scheme())
if err != nil {
return err
return errors.Wrapf(err, "failed to get gvk for object %q", ctrlclient.ObjectKeyFromObject(o))
}
if p.helpers[gvk] == nil {
// object is created, should not patch.
return nil
}
if err := p.helpers[gvk].Patch(ctx, o); err != nil {
return err
return errors.Wrapf(err, "failed to patch object %q", ctrlclient.ObjectKeyFromObject(o))
}
}

Some files were not shown because too many files have changed in this diff Show More