mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
fix: Remove the error stack from the intermediate layer. (#2521)
Signed-off-by: joyceliu <joyceliu@yunify.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
34448781a6
commit
954579beb5
|
|
@ -59,12 +59,12 @@ func InitProfiling(ctx context.Context) error {
|
|||
case "cpu":
|
||||
f, err = os.Create(profileOutput)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrap(err, "failed to create cpu profile")
|
||||
}
|
||||
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrap(err, "failed to start cpu profile")
|
||||
}
|
||||
// Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to
|
||||
// output anything. We choose to sample all events.
|
||||
|
|
@ -113,12 +113,12 @@ func FlushProfiling() error {
|
|||
|
||||
f, err := os.Create(profileOutput)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to create profile %s", profileName)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := profile.WriteTo(f, 0); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to write profile %s", profileName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -142,7 +142,7 @@ func InitGOPS() error {
|
|||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
// Bind to a random port on address 127.0.0.1
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrap(err, "failed to listen gops agent")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package app
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
|
|
@ -37,7 +36,7 @@ func NewControllerManagerCommand() *cobra.Command {
|
|||
Short: "kubekey controller manager",
|
||||
PersistentPreRunE: func(*cobra.Command, []string) error {
|
||||
if err := options.InitGOPS(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return options.InitProfiling(ctx)
|
||||
|
|
|
|||
|
|
@ -17,17 +17,17 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/controller-manager/app"
|
||||
_ "github.com/kubesphere/kubekey/v4/pkg/controllers/core"
|
||||
_ "github.com/kubesphere/kubekey/v4/pkg/controllers/infrastructure"
|
||||
)
|
||||
|
||||
func main() {
|
||||
command := app.NewControllerManagerCommand()
|
||||
code := cli.Run(command)
|
||||
os.Exit(code)
|
||||
if err := app.NewControllerManagerCommand().Execute(); err != nil {
|
||||
fmt.Printf("%+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
|
||||
|
|
@ -48,7 +47,7 @@ func newArtifactExportCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
@ -71,7 +70,7 @@ func newArtifactImagesCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
|
||||
|
|
@ -48,7 +47,7 @@ func newCertsRenewCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
|
||||
|
|
@ -47,7 +46,7 @@ func newCreateClusterCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
|
||||
|
|
@ -49,7 +48,7 @@ func newInitOSCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
@ -72,7 +71,7 @@ func newInitRegistryCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
playbook, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options/builtin"
|
||||
|
|
@ -43,7 +42,7 @@ func NewPreCheckCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
playbook, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml"))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -46,9 +46,7 @@ func NewArtifactExportOptions() *ArtifactExportOptions {
|
|||
|
||||
// Flags add to newArtifactExportCommand
|
||||
func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
|
||||
return fss
|
||||
return o.CommonOptions.Flags()
|
||||
}
|
||||
|
||||
// Complete options. create Playbook, Config and Inventory
|
||||
|
|
@ -75,10 +73,10 @@ func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kk
|
|||
SkipTags: []string{"certs"},
|
||||
}
|
||||
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get local inventory. Please set it by \"--inventory\"")
|
||||
return nil, err
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return playbook, nil
|
||||
|
|
@ -101,9 +99,7 @@ func NewArtifactImagesOptions() *ArtifactImagesOptions {
|
|||
|
||||
// Flags add to newArtifactImagesCommand
|
||||
func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
|
||||
return fss
|
||||
return o.CommonOptions.Flags()
|
||||
}
|
||||
|
||||
// Complete options. create Playbook, Config and Inventory
|
||||
|
|
|
|||
|
|
@ -42,9 +42,7 @@ type CertsRenewOptions struct {
|
|||
|
||||
// Flags add to newCertsRenewCommand
|
||||
func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
|
||||
return fss
|
||||
return o.CommonOptions.Flags()
|
||||
}
|
||||
|
||||
// Complete options. create Playbook, Config and Inventory
|
||||
|
|
@ -68,9 +66,6 @@ func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
|
|||
Debug: o.Debug,
|
||||
Tags: []string{"certs"},
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return playbook, nil
|
||||
return playbook, o.CommonOptions.Complete(playbook)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,13 +90,13 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
}
|
||||
// override kube_version in config
|
||||
if err := completeConfig(o.Kubernetes, o.CommonOptions.ConfigFile, o.CommonOptions.Config); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return playbook, o.completeConfig()
|
||||
|
|
|
|||
|
|
@ -50,9 +50,7 @@ func NewInitOSOptions() *InitOSOptions {
|
|||
|
||||
// Flags add to newInitOSCommand
|
||||
func (o *InitOSOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
|
||||
return fss
|
||||
return o.CommonOptions.Flags()
|
||||
}
|
||||
|
||||
// Complete options. create Playbook, Config and Inventory
|
||||
|
|
@ -79,14 +77,13 @@ func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.P
|
|||
}
|
||||
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to complete playbook")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return playbook, o.complateConfig()
|
||||
return playbook, o.completeConfig()
|
||||
}
|
||||
|
||||
func (o *InitOSOptions) complateConfig() error {
|
||||
|
||||
func (o *InitOSOptions) completeConfig() error {
|
||||
if wd, _, err := unstructured.NestedString(o.CommonOptions.Config.Value(), _const.Workdir); err != nil {
|
||||
// workdir should set by CommonOptions
|
||||
return errors.Wrapf(err, "failed to get %q in config", _const.Workdir)
|
||||
|
|
@ -120,9 +117,7 @@ func NewInitRegistryOptions() *InitRegistryOptions {
|
|||
|
||||
// Flags add to newInitRegistryCommand
|
||||
func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
|
||||
return fss
|
||||
return o.CommonOptions.Flags()
|
||||
}
|
||||
|
||||
// Complete options. create Playbook, Config and Inventory
|
||||
|
|
@ -147,9 +142,6 @@ func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkco
|
|||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to complete playbook")
|
||||
}
|
||||
|
||||
return playbook, nil
|
||||
return playbook, o.CommonOptions.Complete(playbook)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,11 +74,8 @@ func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1
|
|||
Tags: tags,
|
||||
}
|
||||
if err := completeInventory(o.CommonOptions.InventoryFile, o.CommonOptions.Inventory); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return playbook, nil
|
||||
return playbook, o.CommonOptions.Complete(playbook)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -111,14 +111,18 @@ func NewCommonOptions() CommonOptions {
|
|||
// resources, and then runs the command manager.
|
||||
func (o *CommonOptions) Run(ctx context.Context, playbook *kkcorev1.Playbook) error {
|
||||
// create workdir directory,if not exists
|
||||
if _, err := os.Stat(o.Workdir); os.IsNotExist(err) {
|
||||
if _, err := os.Stat(o.Workdir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "failed to stat local dir %q for playbook %q", o.Workdir, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
// if dir is not exist, create it.
|
||||
if err := os.MkdirAll(o.Workdir, os.ModePerm); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to create local dir %q for playbook %q", o.Workdir, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}
|
||||
restconfig := &rest.Config{}
|
||||
if err := proxy.RestConfig(filepath.Join(o.Workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
|
||||
Scheme: _const.Scheme,
|
||||
|
|
@ -172,13 +176,11 @@ func (o *CommonOptions) Complete(playbook *kkcorev1.Playbook) error {
|
|||
}
|
||||
o.Workdir = filepath.Join(wd, o.Workdir)
|
||||
}
|
||||
|
||||
// Generate and complete the configuration.
|
||||
if err := o.completeConfig(o.Config); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
playbook.Spec.Config = ptr.Deref(o.Config, kkcorev1.Config{})
|
||||
|
||||
// Complete the inventory reference.
|
||||
o.completeInventory(o.Inventory)
|
||||
playbook.Spec.InventoryRef = &corev1.ObjectReference{
|
||||
|
|
@ -214,7 +216,7 @@ func (o *CommonOptions) completeConfig(config *kkcorev1.Config) error {
|
|||
return errors.New("--set value should be k=v")
|
||||
}
|
||||
if err := setValue(config, setVal[:i], setVal[i+1:]); err != nil {
|
||||
return errors.Wrapf(err, "failed to set value to config by \"--set\" %q", setVal[:i])
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -240,29 +242,29 @@ func setValue(config *kkcorev1.Config, key, val string) error {
|
|||
var value map[string]any
|
||||
err := json.Unmarshal([]byte(val), &value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to unmarshal json for %q", key)
|
||||
return errors.Wrapf(err, "failed to unmarshal json object value for \"--set %s\"", key)
|
||||
}
|
||||
|
||||
return errors.Wrapf(unstructured.SetNestedMap(config.Value(), value, key),
|
||||
"failed to set %q to config", key)
|
||||
"failed to set \"--set %s\" to config", key)
|
||||
case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"):
|
||||
var value []any
|
||||
err := json.Unmarshal([]byte(val), &value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to unmarshal json for %q", key)
|
||||
return errors.Wrapf(err, "failed to unmarshal json array value for \"--set %s\"", key)
|
||||
}
|
||||
|
||||
return errors.Wrapf(unstructured.SetNestedSlice(config.Value(), value, key),
|
||||
"failed to set %q to config", key)
|
||||
"failed to set \"--set %s\" to config", key)
|
||||
case strings.EqualFold(val, "TRUE") || strings.EqualFold(val, "YES") || strings.EqualFold(val, "Y"):
|
||||
return errors.Wrapf(unstructured.SetNestedField(config.Value(), true, key),
|
||||
"failed to set %q to config", key)
|
||||
"failed to set \"--set %s\" to config", key)
|
||||
case strings.EqualFold(val, "FALSE") || strings.EqualFold(val, "NO") || strings.EqualFold(val, "N"):
|
||||
return errors.Wrapf(unstructured.SetNestedField(config.Value(), false, key),
|
||||
"failed to set %q to config", key)
|
||||
"failed to set \"--set %s\" to config", key)
|
||||
default:
|
||||
return errors.Wrapf(unstructured.SetNestedField(config.Value(), val, key),
|
||||
"failed to set %q to config", key)
|
||||
"failed to set \"--set %s\" to config", key)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,13 +81,11 @@ func InitProfiling(ctx context.Context) error {
|
|||
|
||||
// If the command is interrupted before the end (ctrl-c), flush the
|
||||
// profiling files
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := f.Close(); err != nil {
|
||||
fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
|
||||
if err := FlushProfiling(); err != nil {
|
||||
fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
|
|
@ -115,12 +113,12 @@ func FlushProfiling() error {
|
|||
|
||||
f, err := os.Create(profileOutput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create profile")
|
||||
return errors.Wrapf(err, "failed to create profile %s", profileName)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := profile.WriteTo(f, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to write profile")
|
||||
return errors.Wrapf(err, "failed to write profile %s", profileName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -145,7 +143,7 @@ func InitGOPS() error {
|
|||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
// Bind to a random port on address 127.0.0.1
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
return errors.Wrap(err, "failed to listen gops")
|
||||
return errors.Wrap(err, "failed to listen gops agent")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,9 +106,6 @@ func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
|
|||
SkipTags: o.SkipTags,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
if err := o.CommonOptions.Complete(playbook); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return playbook, nil
|
||||
return playbook, o.CommonOptions.Complete(playbook)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ func newPlaybookCommand() *cobra.Command {
|
|||
return errors.Wrap(err, "failed to get inventory")
|
||||
}
|
||||
if err := proxy.RestConfig(filepath.Join(_const.GetWorkdirFromConfig(playbook.Spec.Config), _const.RuntimeDir), restconfig); err != nil {
|
||||
return errors.Wrap(err, "failed to get rest config")
|
||||
return err
|
||||
}
|
||||
// use proxy client to store task.
|
||||
proxyclient, err := ctrlclient.New(restconfig, ctrlclient.Options{
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ func NewRootCommand() *cobra.Command {
|
|||
Long: "kubekey is a daemon that execute command in a node",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
|
||||
if err := options.InitGOPS(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return options.InitProfiling(cmd.Context())
|
||||
|
|
@ -83,7 +83,7 @@ func NewRootCommand() *cobra.Command {
|
|||
func CommandRunE(ctx context.Context, workdir string, playbook *kkcorev1.Playbook, config *kkcorev1.Config, inventory *kkcorev1.Inventory) error {
|
||||
restconfig := &rest.Config{}
|
||||
if err := proxy.RestConfig(filepath.Join(workdir, _const.RuntimeDir), restconfig); err != nil {
|
||||
return errors.Wrap(err, "failed to get restconfig")
|
||||
return err
|
||||
}
|
||||
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
|
||||
Scheme: _const.Scheme,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package app
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
|
|
@ -32,7 +31,7 @@ func newRunCommand() *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
playbook, err := o.Complete(cmd, args)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return o.CommonOptions.Run(cmd.Context(), playbook)
|
||||
|
|
|
|||
|
|
@ -17,15 +17,15 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
command := app.NewRootCommand()
|
||||
code := cli.Run(command)
|
||||
os.Exit(code)
|
||||
if err := app.NewRootCommand().Execute(); err != nil {
|
||||
fmt.Printf("%+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ type Connector interface {
|
|||
func NewConnector(host string, v variable.Variable) (Connector, error) {
|
||||
vars, err := v.Get(variable.GetAllVariable(host))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get host %q variable", host)
|
||||
return nil, err
|
||||
}
|
||||
connectorVars := make(map[string]any)
|
||||
if c1, ok := vars.(map[string]any)[_const.VariableConnector]; ok {
|
||||
|
|
@ -79,7 +79,7 @@ func NewConnector(host string, v variable.Variable) (Connector, error) {
|
|||
case connectedKubernetes:
|
||||
workdir, err := v.Get(variable.GetWorkDir())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get workdir from variable")
|
||||
return nil, err
|
||||
}
|
||||
wd, ok := workdir.(string)
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ var _ Connector = &kubernetesConnector{}
|
|||
func newKubernetesConnector(host string, workdir string, connectorVars map[string]any) (*kubernetesConnector, error) {
|
||||
kubeconfig, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorKubeconfig)
|
||||
if err != nil && host != _const.VariableLocalHost {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kubernetesConnector{
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ var _ GatherFacts = &localConnector{}
|
|||
func newLocalConnector(connectorVars map[string]any) *localConnector {
|
||||
password, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorPassword)
|
||||
if err != nil { // password is not necessary when execute with root user.
|
||||
klog.V(4).InfoS("get connector sudo password failed, execute command without sudo", "error", err)
|
||||
klog.Warning("Warning: Failed to obtain local connector password when executing command with sudo. Please ensure the 'kk' process is run by a root-privileged user.")
|
||||
}
|
||||
|
||||
return &localConnector{Password: password, Cmd: exec.New(), shell: defaultSHELL}
|
||||
|
|
@ -113,7 +113,7 @@ func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte
|
|||
output = bytes.Replace(output, []byte("Password:"), []byte(""), -1)
|
||||
}
|
||||
|
||||
return output, err
|
||||
return output, errors.Wrapf(err, "failed to execute command")
|
||||
}
|
||||
|
||||
// HostInfo gathers and returns host information for the local host.
|
||||
|
|
@ -124,22 +124,22 @@ func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
|||
osVars := make(map[string]any)
|
||||
var osRelease bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to fetch os-release")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
|
||||
kernel, err := c.ExecuteCommand(ctx, "uname -r")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get kernel version")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSpace(kernel))
|
||||
hn, err := c.ExecuteCommand(ctx, "hostname")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get hostname")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSHostName] = string(bytes.TrimSpace(hn))
|
||||
arch, err := c.ExecuteCommand(ctx, "arch")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get arch")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSArchitecture] = string(bytes.TrimSpace(arch))
|
||||
|
||||
|
|
@ -147,12 +147,12 @@ func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
|||
procVars := make(map[string]any)
|
||||
var cpu bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get cpuinfo")
|
||||
return nil, err
|
||||
}
|
||||
procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
|
||||
var mem bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get meminfo")
|
||||
return nil, err
|
||||
}
|
||||
procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")
|
||||
|
||||
|
|
|
|||
|
|
@ -131,14 +131,14 @@ func (c *sshConnector) Init(context.Context) error {
|
|||
auth = append(auth, ssh.PublicKeys(privateKey))
|
||||
}
|
||||
|
||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%v", c.Host, c.Port), &ssh.ClientConfig{
|
||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", c.Host, c.Port), &ssh.ClientConfig{
|
||||
User: c.User,
|
||||
Auth: auth,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to dial %q:%d ssh server", c.Host, c.Port)
|
||||
return errors.Wrapf(err, "failed to dial %s:%d ssh server", c.Host, c.Port)
|
||||
}
|
||||
c.client = sshClient
|
||||
|
||||
|
|
@ -288,7 +288,7 @@ func (c *sshConnector) ExecuteCommand(_ context.Context, cmd string) ([]byte, er
|
|||
|
||||
output := append(stdoutBuf.Bytes(), stderrBuf.Bytes()...)
|
||||
|
||||
return output, err
|
||||
return output, errors.Wrap(err, "failed to execute ssh command")
|
||||
}
|
||||
|
||||
// HostInfo for GatherFacts
|
||||
|
|
@ -297,22 +297,22 @@ func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
|||
osVars := make(map[string]any)
|
||||
var osRelease bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/etc/os-release", &osRelease); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to fetch os-release")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSRelease] = convertBytesToMap(osRelease.Bytes(), "=")
|
||||
kernel, err := c.ExecuteCommand(ctx, "uname -r")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get kernel version")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSKernelVersion] = string(bytes.TrimSpace(kernel))
|
||||
hn, err := c.ExecuteCommand(ctx, "hostname")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get hostname")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSHostName] = string(bytes.TrimSpace(hn))
|
||||
arch, err := c.ExecuteCommand(ctx, "arch")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get arch")
|
||||
return nil, err
|
||||
}
|
||||
osVars[_const.VariableOSArchitecture] = string(bytes.TrimSpace(arch))
|
||||
|
||||
|
|
@ -320,12 +320,12 @@ func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
|||
procVars := make(map[string]any)
|
||||
var cpu bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/proc/cpuinfo", &cpu); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get cpuinfo")
|
||||
return nil, err
|
||||
}
|
||||
procVars[_const.VariableProcessCPU] = convertBytesToSlice(cpu.Bytes(), ":")
|
||||
var mem bytes.Buffer
|
||||
if err := c.FetchFile(ctx, "/proc/meminfo", &mem); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get meminfo error")
|
||||
return nil, err
|
||||
}
|
||||
procVars[_const.VariableProcessMemory] = convertBytesToMap(mem.Bytes(), ":")
|
||||
|
||||
|
|
|
|||
|
|
@ -94,16 +94,16 @@ func (r PlaybookReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
|
|||
// get playbook
|
||||
playbook := &kkcorev1.Playbook{}
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, playbook); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get playbook %q", req.String())
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get playbook %q", req.String())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
helper, err := patch.NewHelper(playbook, r.Client)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
|
|
@ -113,10 +113,10 @@ func (r PlaybookReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
|
|||
playbook.Status.FailureMessage = retErr.Error()
|
||||
}
|
||||
if err := r.reconcileStatus(ctx, playbook); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
if err := helper.Patch(ctx, playbook); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -130,10 +130,10 @@ func (r PlaybookReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
|
|||
|
||||
// Handle deleted clusters
|
||||
if !playbook.DeletionTimestamp.IsZero() {
|
||||
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, playbook))
|
||||
return reconcile.Result{}, r.reconcileDelete(ctx, playbook)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.WithStack(r.reconcileNormal(ctx, playbook))
|
||||
return ctrl.Result{}, r.reconcileNormal(ctx, playbook)
|
||||
}
|
||||
|
||||
func (r PlaybookReconciler) reconcileStatus(ctx context.Context, playbook *kkcorev1.Playbook) error {
|
||||
|
|
@ -171,13 +171,13 @@ func (r PlaybookReconciler) reconcileStatus(ctx context.Context, playbook *kkcor
|
|||
func (r *PlaybookReconciler) reconcileDelete(ctx context.Context, playbook *kkcorev1.Playbook) error {
|
||||
podList := &corev1.PodList{}
|
||||
if err := util.GetObjectListFromOwner(ctx, r.Client, playbook, podList); err != nil {
|
||||
return errors.Wrapf(err, "failed to get pod list from playbook %q", ctrlclient.ObjectKeyFromObject(playbook))
|
||||
return err
|
||||
}
|
||||
if playbook.Status.Phase == kkcorev1.PlaybookPhaseFailed || playbook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded {
|
||||
// playbook has completed. delete the owner pods.
|
||||
for _, obj := range podList.Items {
|
||||
if err := r.Client.Delete(ctx, &obj); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to delete pod for %q", ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -259,5 +259,6 @@ func (r *PlaybookReconciler) dealRunningPlaybook(ctx context.Context, playbook *
|
|||
return errors.Wrapf(err, "failed to set ownerReference to playbook pod %q", pod.GenerateName)
|
||||
}
|
||||
|
||||
return errors.WithStack(r.Client.Create(ctx, pod))
|
||||
return errors.Wrapf(r.Client.Create(ctx, pod),
|
||||
"failed to create pod of playbook %q", ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func (w *PlaybookWebhook) Default(ctx context.Context, obj runtime.Object) error
|
|||
if playbook.Spec.ServiceAccountName == "" && os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE) != "" {
|
||||
// should create default service account in current namespace
|
||||
if err := w.syncServiceAccount(ctx, playbook, os.Getenv(_const.ENV_EXECUTOR_CLUSTERROLE)); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
playbook.Spec.ServiceAccountName = defaultServiceAccountName
|
||||
}
|
||||
|
|
@ -78,13 +78,13 @@ func (w *PlaybookWebhook) syncServiceAccount(ctx context.Context, playbook *kkco
|
|||
// check if clusterrole is exist
|
||||
cr := &rbacv1.ClusterRole{}
|
||||
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: clusterrole}, cr); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to get clusterrole %q for playbook %q", clusterrole, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
// check if the default service account is exist
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: playbook.Namespace, Name: defaultServiceAccountName}, sa); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to get serviceaccount %q for playbook %q", defaultServiceAccountName, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
// create service account if not exist.
|
||||
sa = &corev1.ServiceAccount{
|
||||
|
|
@ -101,7 +101,7 @@ func (w *PlaybookWebhook) syncServiceAccount(ctx context.Context, playbook *kkco
|
|||
crb := &rbacv1.ClusterRoleBinding{}
|
||||
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: defaultClusterRoleBindingName}, crb); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to get clusterrolebinding %q for playbook %q", defaultClusterRoleBindingName, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
// create clusterrolebinding if not exist
|
||||
return w.Client.Create(ctx, &rbacv1.ClusterRoleBinding{
|
||||
|
|
@ -134,5 +134,6 @@ func (w *PlaybookWebhook) syncServiceAccount(ctx context.Context, playbook *kkco
|
|||
Namespace: playbook.Namespace,
|
||||
})
|
||||
|
||||
return errors.WithStack(w.Client.Patch(ctx, ncrb, ctrlclient.MergeFrom(crb)))
|
||||
return errors.Wrapf(w.Client.Patch(ctx, ncrb, ctrlclient.MergeFrom(crb)),
|
||||
"fail to update clusterrolebinding %q for playbook %q", defaultClusterRoleBindingName, ctrlclient.ObjectKeyFromObject(playbook))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ func newClusterScope(ctx context.Context, client ctrlclient.Client, clusterReq r
|
|||
func (p *clusterScope) newPatchHelper(obj ...ctrlclient.Object) error {
|
||||
helper, err := util.NewPatchHelper(p.client, obj...)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create patch helper with scope %q", p.String())
|
||||
return err
|
||||
}
|
||||
p.PatchHelper = helper
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (p *clusterScope) isPaused() bool {
|
|||
func (p *clusterScope) ifPlaybookCompleted(ctx context.Context, owner ctrlclient.Object) (bool, error) {
|
||||
playbookList := &kkcorev1.PlaybookList{}
|
||||
if err := util.GetObjectListFromOwner(ctx, p.client, owner, playbookList); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to get playbook list from owner %q", ctrlclient.ObjectKeyFromObject(owner))
|
||||
return false, err
|
||||
}
|
||||
for _, playbook := range playbookList.Items {
|
||||
if playbook.Status.Phase != kkcorev1.PlaybookPhaseFailed && playbook.Status.Phase != kkcorev1.PlaybookPhaseSucceeded {
|
||||
|
|
|
|||
|
|
@ -109,11 +109,11 @@ func (r *InventoryReconciler) objectToInventoryMapFunc(ctx context.Context, obj
|
|||
func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, retErr error) {
|
||||
inventory := &kkcorev1.Inventory{}
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, inventory); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get inventory %q", req.String())
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get inventory %q", req.String())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
clusterName := inventory.Labels[clusterv1beta1.ClusterNameLabel]
|
||||
if clusterName == "" {
|
||||
|
|
@ -126,14 +126,14 @@ func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||
Name: clusterName,
|
||||
}})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := scope.newPatchHelper(scope.Inventory); err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
defer func() {
|
||||
if err := scope.PatchHelper.Patch(ctx, scope.Inventory); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -154,17 +154,17 @@ func (r *InventoryReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||
|
||||
// Handle deleted inventory
|
||||
if !scope.Inventory.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope))
|
||||
return ctrl.Result{}, r.reconcileDelete(ctx, scope)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope))
|
||||
return ctrl.Result{}, r.reconcileNormal(ctx, scope)
|
||||
}
|
||||
|
||||
func (r *InventoryReconciler) reconcileDelete(ctx context.Context, scope *clusterScope) error {
|
||||
// waiting playbook delete
|
||||
playbookList := &kkcorev1.PlaybookList{}
|
||||
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.Inventory, playbookList); err != nil {
|
||||
return errors.Wrapf(err, "failed to get playbook list from inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
|
||||
return err
|
||||
}
|
||||
for _, obj := range playbookList.Items {
|
||||
if err := r.Client.Delete(ctx, &obj); err != nil {
|
||||
|
|
@ -219,13 +219,13 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
|
|||
// when it's pending: inventory's host haved changed.
|
||||
scope.Inventory.Status.Ready = false
|
||||
if err := r.createHostCheckPlaybook(ctx, scope); err != nil {
|
||||
return errors.Wrapf(err, "failed to create host check playbook in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
|
||||
return err
|
||||
}
|
||||
scope.Inventory.Status.Phase = kkcorev1.InventoryPhaseRunning
|
||||
case kkcorev1.InventoryPhaseRunning:
|
||||
// sync inventory's status from playbook
|
||||
if err := r.reconcileInventoryPlaybook(ctx, scope); err != nil {
|
||||
return errors.Wrapf(err, "failed to reconcile running inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
|
||||
return err
|
||||
}
|
||||
case kkcorev1.InventoryPhaseSucceeded:
|
||||
// sync inventory's control_plane groups from ControlPlane
|
||||
|
|
@ -245,11 +245,11 @@ func (r *InventoryReconciler) reconcileNormal(ctx context.Context, scope *cluste
|
|||
scope.Inventory.Spec.Groups = make(map[string]kkcorev1.InventoryGroup)
|
||||
}
|
||||
if err := r.syncInventoryControlPlaneGroups(ctx, scope); err != nil {
|
||||
return errors.Wrapf(err, "failed to sync control-plane groups in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
|
||||
return err
|
||||
}
|
||||
// sync inventory's worker groups from machinedeployment
|
||||
if err := r.syncInventoryWorkerGroups(ctx, scope); err != nil {
|
||||
return errors.Wrapf(err, "failed to sync worker groups in inventory %q", ctrlclient.ObjectKeyFromObject(scope.Inventory))
|
||||
return err
|
||||
}
|
||||
scope.Inventory.Spec.Groups[defaultClusterGroup] = kkcorev1.InventoryGroup{
|
||||
Groups: []string{getControlPlaneGroupName(), getWorkerGroupName()},
|
||||
|
|
@ -267,11 +267,11 @@ func (r *InventoryReconciler) reconcileInventoryPlaybook(ctx context.Context, sc
|
|||
}
|
||||
playbook := &kkcorev1.Playbook{}
|
||||
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Name: scope.Inventory.Annotations[kkcorev1.HostCheckPlaybookAnnotation], Namespace: scope.Namespace}, playbook); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return r.createHostCheckPlaybook(ctx, scope)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to get playbook with inventory %q annotation %q", ctrlclient.ObjectKeyFromObject(scope.Inventory), kkcorev1.HostCheckPlaybookAnnotation)
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "failed to get playbook with inventory %q annotation %q", ctrlclient.ObjectKeyFromObject(scope.Inventory), kkcorev1.HostCheckPlaybookAnnotation)
|
||||
return r.createHostCheckPlaybook(ctx, scope)
|
||||
}
|
||||
switch playbook.Status.Phase {
|
||||
case kkcorev1.PlaybookPhaseSucceeded:
|
||||
|
|
@ -343,7 +343,7 @@ func (r *InventoryReconciler) syncInventoryControlPlaneGroups(ctx context.Contex
|
|||
// get machineList from controlPlane
|
||||
machineList := &clusterv1beta1.MachineList{}
|
||||
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.ControlPlane, machineList); err != nil {
|
||||
return errors.Wrapf(err, "failed to get machineList from controlPlane %q", ctrlclient.ObjectKeyFromObject(scope.ControlPlane))
|
||||
return err
|
||||
}
|
||||
if len(machineList.Items) != int(groupNum) {
|
||||
klog.Info("waiting machine synced.")
|
||||
|
|
|
|||
|
|
@ -86,11 +86,11 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
kkcluster := &capkkinfrav1beta1.KKCluster{}
|
||||
err := r.Client.Get(ctx, req.NamespacedName, kkcluster)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkcluster %q", req.String())
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkcluster %q", req.String())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
clusterName := kkcluster.Labels[clusterv1beta1.ClusterNameLabel]
|
||||
if clusterName == "" {
|
||||
|
|
@ -103,10 +103,10 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
Name: clusterName,
|
||||
}})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := scope.newPatchHelper(scope.KKCluster, scope.Inventory); err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
|
|
@ -116,10 +116,10 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
scope.KKCluster.Status.FailureMessage = retErr.Error()
|
||||
}
|
||||
if err := r.reconcileStatus(ctx, scope); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
if err := scope.PatchHelper.Patch(ctx, scope.KKCluster, scope.Inventory); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -140,11 +140,11 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
|
||||
// Handle deleted clusters
|
||||
if !scope.KKCluster.DeletionTimestamp.IsZero() {
|
||||
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope))
|
||||
return reconcile.Result{}, r.reconcileDelete(ctx, scope)
|
||||
}
|
||||
|
||||
// Handle non-deleted clusters
|
||||
return reconcile.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope))
|
||||
return reconcile.Result{}, r.reconcileNormal(ctx, scope)
|
||||
}
|
||||
|
||||
// reconcileDelete delete cluster
|
||||
|
|
@ -152,7 +152,7 @@ func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, scope *cluste
|
|||
// waiting inventory deleted
|
||||
inventoryList := &kkcorev1.InventoryList{}
|
||||
if err := util.GetObjectListFromOwner(ctx, r.Client, scope.KKCluster, inventoryList); err != nil {
|
||||
return errors.Wrapf(err, "failed to get inventoryList from kkcluster %q", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
|
||||
return err
|
||||
}
|
||||
for _, obj := range inventoryList.Items {
|
||||
if err := r.Client.Delete(ctx, &obj); err != nil {
|
||||
|
|
@ -174,7 +174,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, scope *cluste
|
|||
if err != nil { // cannot convert kkcluster to inventory. may be kkcluster is not valid.
|
||||
scope.KKCluster.Status.FailureReason = capkkinfrav1beta1.KKClusterFailedInvalidHosts
|
||||
|
||||
return errors.Wrapf(err, "failed to convert kkcluster %q to inventoryHost", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
|
||||
return err
|
||||
}
|
||||
// if inventory is not exist. create it
|
||||
if scope.Inventory.Name == "" {
|
||||
|
|
@ -194,7 +194,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, scope *cluste
|
|||
return errors.Wrapf(err, "failed to set ownerReference from kkcluster %q to inventory", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
|
||||
}
|
||||
|
||||
return r.Client.Create(ctx, scope.Inventory)
|
||||
return errors.Wrapf(r.Client.Create(ctx, scope.Inventory), "failed to create inventory for kkcluster %q", ctrlclient.ObjectKeyFromObject(scope.KKCluster))
|
||||
}
|
||||
|
||||
// if inventory's host is match kkcluster.inventoryHosts. skip
|
||||
|
|
|
|||
|
|
@ -86,11 +86,11 @@ func (r *KKMachineReconciler) SetupWithManager(mgr ctrl.Manager, o options.Contr
|
|||
func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, retErr error) {
|
||||
kkmachine := &capkkinfrav1beta1.KKMachine{}
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, kkmachine); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkmachine %q", req.String())
|
||||
}
|
||||
|
||||
return ctrl.Result{}, errors.Wrapf(err, "failed to get kkmachine %q", req.String())
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
clusterName := kkmachine.Labels[clusterv1beta1.ClusterNameLabel]
|
||||
if clusterName == "" {
|
||||
|
|
@ -103,14 +103,14 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||
Name: clusterName,
|
||||
}})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := scope.newPatchHelper(kkmachine); err != nil {
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
defer func() {
|
||||
if err := scope.PatchHelper.Patch(ctx, kkmachine); err != nil {
|
||||
retErr = errors.Join(retErr, errors.WithStack(err))
|
||||
retErr = errors.Join(retErr, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -130,12 +130,12 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||
}
|
||||
|
||||
if !kkmachine.DeletionTimestamp.IsZero() {
|
||||
return reconcile.Result{}, errors.WithStack(r.reconcileDelete(ctx, scope, kkmachine))
|
||||
return reconcile.Result{}, r.reconcileDelete(ctx, scope, kkmachine)
|
||||
}
|
||||
|
||||
machine := &clusterv1beta1.Machine{}
|
||||
if err := util.GetOwnerFromObject(ctx, r.Client, kkmachine, machine); err != nil {
|
||||
return reconcile.Result{}, errors.Wrapf(err, "failed to get machine from kkmachine %q", ctrlclient.ObjectKeyFromObject(machine))
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
kkmachine.Spec.Version = machine.Spec.Version
|
||||
|
||||
|
|
@ -151,7 +151,7 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
|||
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, errors.WithStack(r.reconcileNormal(ctx, scope, kkmachine, machine))
|
||||
return reconcile.Result{}, r.reconcileNormal(ctx, scope, kkmachine, machine)
|
||||
}
|
||||
|
||||
// reconcileDelete handles delete reconcile.
|
||||
|
|
@ -161,7 +161,7 @@ func (r *KKMachineReconciler) reconcileDelete(ctx context.Context, scope *cluste
|
|||
delNodePlaybookName := kkmachine.Annotations[capkkinfrav1beta1.DeleteNodePlaybookAnnotation]
|
||||
addNodePlaybook, delNodePlaybook, err := r.getPlaybook(ctx, scope, kkmachine)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case addNodePlaybookName == "" && delNodePlaybookName == "":
|
||||
|
|
@ -177,11 +177,11 @@ func (r *KKMachineReconciler) reconcileDelete(ctx context.Context, scope *cluste
|
|||
return nil
|
||||
case addNodePlaybookName != "" && delNodePlaybookName != "":
|
||||
if addNodePlaybook != nil && addNodePlaybook.DeletionTimestamp.IsZero() {
|
||||
return r.Client.Delete(ctx, addNodePlaybook)
|
||||
return errors.Wrapf(r.Client.Delete(ctx, addNodePlaybook), "failed to delete addNodePlaybook for kkmachine %q", ctrlclient.ObjectKeyFromObject(kkmachine))
|
||||
}
|
||||
if delNodePlaybook != nil && delNodePlaybook.DeletionTimestamp.IsZero() {
|
||||
if delNodePlaybook.Status.Phase == kkcorev1.PlaybookPhaseSucceeded {
|
||||
return r.Client.Delete(ctx, delNodePlaybook)
|
||||
return errors.Wrapf(r.Client.Delete(ctx, delNodePlaybook), "failed to delete delNodePlaybook for kkmachine %q", ctrlclient.ObjectKeyFromObject(kkmachine))
|
||||
}
|
||||
// should waiting delNodePlaybook completed
|
||||
return nil
|
||||
|
|
@ -238,14 +238,13 @@ func (r *KKMachineReconciler) reconcileNormal(ctx context.Context, scope *cluste
|
|||
// check playbook status
|
||||
playbook := &kkcorev1.Playbook{}
|
||||
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: scope.Namespace, Name: playbookName}, playbook); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// the playbook has not found.
|
||||
r.EventRecorder.Eventf(kkmachine, corev1.EventTypeWarning, "AddNodeFailed", "add node playbook: %q not found", playbookName)
|
||||
|
||||
return nil
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to get playbook %s/%s", scope.Namespace, playbookName)
|
||||
}
|
||||
// the playbook has not found.
|
||||
r.EventRecorder.Eventf(kkmachine, corev1.EventTypeWarning, "AddNodeFailed", "add node playbook: %q not found", playbookName)
|
||||
|
||||
return errors.Wrapf(err, "failed to get playbook %s/%s", scope.Namespace, playbookName)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch playbook.Status.Phase {
|
||||
|
|
@ -371,7 +370,7 @@ func (r *KKMachineReconciler) getConfig(scope *clusterScope, kkmachine *capkkinf
|
|||
}
|
||||
data, err := kubeVersionConfigs.ReadFile(fmt.Sprintf("versions/%s.yaml", *kkmachine.Spec.Version))
|
||||
if err != nil {
|
||||
return config, errors.Wrap(err, "failed to read default config file")
|
||||
return config, err
|
||||
}
|
||||
if err := yaml.Unmarshal(data, config); err != nil {
|
||||
return config, errors.Wrap(err, "failed to unmarshal config file")
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ func ParseBool(ctx map[string]any, inputs ...string) (bool, error) {
|
|||
return bytes.EqualFold(o, []byte("true"))
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
return false, err
|
||||
}
|
||||
if !output {
|
||||
return output, nil
|
||||
|
|
|
|||
|
|
@ -47,19 +47,19 @@ func (e blockExecutor) Exec(ctx context.Context) error {
|
|||
|
||||
// merge variable which defined in block
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars, hosts...)); err != nil {
|
||||
return errors.Wrapf(err, "failed to merge block-variable: %q in playbook %q", block.Name, e.playbook)
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(block.Block) != 0:
|
||||
if err := e.dealBlock(ctx, hosts, ignoreErrors, when, tags, block); err != nil {
|
||||
return errors.Wrapf(err, "failed to deal block %q in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
|
||||
return err
|
||||
}
|
||||
case block.IncludeTasks != "":
|
||||
// do nothing. include tasks has converted to blocks.
|
||||
default:
|
||||
if err := e.dealTask(ctx, hosts, when, block); err != nil {
|
||||
return errors.Wrapf(err, "failed to deal task %s in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -122,7 +122,7 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
|
|||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute block %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
// if block exec failed exec rescue
|
||||
if e.playbook.Status.Phase == kkcorev1.PlaybookPhaseFailed && len(block.Rescue) != 0 {
|
||||
|
|
@ -135,7 +135,7 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
|
|||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute rescue %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
// exec always after block
|
||||
|
|
@ -149,7 +149,7 @@ func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErro
|
|||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
errs = errors.Join(errs, errors.Wrapf(err, "failed to execute always %q tasks in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook)))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
// when execute error. return
|
||||
|
|
@ -182,9 +182,5 @@ func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []stri
|
|||
return errors.Wrapf(err, "failed to set playbook %q ownerReferences to %q", ctrlclient.ObjectKeyFromObject(e.playbook), block.Name)
|
||||
}
|
||||
|
||||
if err := (&taskExecutor{option: e.option, task: task, taskRunTimeout: 60 * time.Minute}).Exec(ctx); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute task %s in playbook %q", block.Name, ctrlclient.ObjectKeyFromObject(e.playbook))
|
||||
}
|
||||
|
||||
return nil
|
||||
return (&taskExecutor{option: e.option, task: task, taskRunTimeout: 60 * time.Minute}).Exec(ctx)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,15 +63,13 @@ func (e playbookExecutor) Exec(ctx context.Context) error {
|
|||
klog.V(5).InfoS("deal project", "playbook", ctrlclient.ObjectKeyFromObject(e.playbook))
|
||||
pj, err := project.New(ctx, *e.playbook, true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to deal project")
|
||||
return err
|
||||
}
|
||||
|
||||
// convert to transfer.Playbook struct
|
||||
pb, err := pj.MarshalPlaybook()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert playbook")
|
||||
return err
|
||||
}
|
||||
|
||||
for _, play := range pb.Play {
|
||||
// check tags
|
||||
if !play.Taggable.IsEnabled(e.playbook.Spec.Tags, e.playbook.Spec.SkipTags) {
|
||||
|
|
@ -87,17 +85,17 @@ func (e playbookExecutor) Exec(ctx context.Context) error {
|
|||
}
|
||||
// when gather_fact is set. get host's information from remote.
|
||||
if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil {
|
||||
return errors.Wrap(err, "failed to deal gather_facts argument")
|
||||
return err
|
||||
}
|
||||
// Batch execution, with each batch being a group of hosts run in serial.
|
||||
var batchHosts [][]string
|
||||
if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil {
|
||||
return errors.Wrap(err, "failed to deal serial argument")
|
||||
return err
|
||||
}
|
||||
e.dealRunOnce(play.RunOnce, hosts, &batchHosts)
|
||||
// exec playbook in each BatchHosts
|
||||
if err := e.execBatchHosts(ctx, play, batchHosts); err != nil {
|
||||
return errors.Wrap(err, "failed to exec batch hosts")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -114,7 +112,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
}
|
||||
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil {
|
||||
return errors.Wrapf(err, "failed to merge variable with play %q", play.Name)
|
||||
return err
|
||||
}
|
||||
// generate task from pre tasks
|
||||
if err := (blockExecutor{
|
||||
|
|
@ -124,7 +122,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
blocks: play.PreTasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute pre-tasks with play %q", play.Name)
|
||||
return err
|
||||
}
|
||||
// generate task from role
|
||||
for _, role := range play.Roles {
|
||||
|
|
@ -133,7 +131,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
continue
|
||||
}
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(role.Vars, serials...)); err != nil {
|
||||
return errors.Wrapf(err, "failed to merge variable with role %q", role.Role)
|
||||
return err
|
||||
}
|
||||
// use the most closely configuration
|
||||
ignoreErrors := role.IgnoreErrors
|
||||
|
|
@ -150,7 +148,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
when: role.When.Data,
|
||||
tags: kkprojectv1.JoinTag(role.Taggable, play.Taggable),
|
||||
}.Exec(ctx)); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute role-tasks")
|
||||
return err
|
||||
}
|
||||
}
|
||||
// generate task from tasks
|
||||
|
|
@ -161,7 +159,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
blocks: play.Tasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute tasks")
|
||||
return err
|
||||
}
|
||||
// generate task from post tasks
|
||||
if err := (blockExecutor{
|
||||
|
|
@ -171,7 +169,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
blocks: play.PostTasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute post-tasks")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -182,7 +180,7 @@ func (e playbookExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.P
|
|||
func (e playbookExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error {
|
||||
ahn, err := e.variable.Get(variable.GetHostnames(host.Hosts))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get hostnames")
|
||||
return err
|
||||
}
|
||||
|
||||
if h, ok := ahn.([]string); ok {
|
||||
|
|
@ -205,20 +203,20 @@ func (e playbookExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool,
|
|||
// get host connector
|
||||
conn, err := connector.NewConnector(hostname, e.variable)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to new connector in host %q", hostname)
|
||||
return err
|
||||
}
|
||||
if err := conn.Init(ctx); err != nil {
|
||||
return errors.Wrapf(err, "failed to init connection in host %q", hostname)
|
||||
return err
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
|
||||
if gf, ok := conn.(connector.GatherFacts); ok {
|
||||
remoteInfo, err := gf.HostInfo(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to execute gather_facts from connector in host %q", hostname)
|
||||
return err
|
||||
}
|
||||
if err := e.variable.Merge(variable.MergeRemoteVariable(remoteInfo, hostname)); err != nil {
|
||||
return errors.Wrapf(err, "failed to merge gather_facts to in host %q", hostname)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -226,7 +224,7 @@ func (e playbookExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool,
|
|||
}
|
||||
for _, hostname := range hosts {
|
||||
if err := dealGatherFactsInHost(hostname); err != nil {
|
||||
return errors.Wrapf(err, "failed to deal gather_facts for host %q", hostname)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -238,7 +236,7 @@ func (e playbookExecutor) dealSerial(serial []any, hosts []string, batchHosts *[
|
|||
var err error
|
||||
*batchHosts, err = converter.GroupHostBySerial(hosts, serial)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to group host by serial")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ func (e *taskExecutor) Exec(ctx context.Context) error {
|
|||
}()
|
||||
// run task
|
||||
if err := e.runTaskLoop(ctx); err != nil {
|
||||
return errors.Wrapf(err, "failed to run task %q", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
return err
|
||||
}
|
||||
// exit when task run failed
|
||||
if e.task.IsFailed() {
|
||||
|
|
@ -192,14 +192,14 @@ func (e *taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) {
|
|||
// task execute
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(h))
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("get variable error: %v", err)
|
||||
stderr = fmt.Sprintf("failed to get host %s variable: %v", h, err)
|
||||
|
||||
return
|
||||
}
|
||||
// convert hostVariable to map
|
||||
had, ok := ha.(map[string]any)
|
||||
if !ok {
|
||||
stderr = fmt.Sprintf("variable is not map error: %v", err)
|
||||
stderr = fmt.Sprintf("host: %s variable is not a map", h)
|
||||
}
|
||||
// check when condition
|
||||
if skip := e.dealWhen(had, &stdout, &stderr); skip {
|
||||
|
|
@ -392,7 +392,7 @@ func (e *taskExecutor) dealRegister(stdout, stderr, host string) error {
|
|||
"stderr": stderrResult,
|
||||
},
|
||||
}, host)); err != nil {
|
||||
return errors.Wrap(err, "failed to register task result to variable")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
|
@ -84,7 +83,7 @@ func (m *commandManager) Run(ctx context.Context) error {
|
|||
m.Playbook.Status.FailureReason = kkcorev1.PlaybookFailedReasonTaskFailed
|
||||
m.Playbook.Status.FailureMessage = err.Error()
|
||||
|
||||
return errors.Wrapf(err, "failed to executor playbook %q", ctrlclient.ObjectKeyFromObject(m.Playbook))
|
||||
return err
|
||||
}
|
||||
m.Playbook.Status.Phase = kkcorev1.PlaybookPhaseSucceeded
|
||||
|
||||
|
|
|
|||
|
|
@ -58,10 +58,10 @@ func (m controllerManager) Run(ctx context.Context) error {
|
|||
}
|
||||
|
||||
if err := m.register(mgr); err != nil {
|
||||
return errors.Wrap(err, "failed to register manager")
|
||||
return err
|
||||
}
|
||||
|
||||
return mgr.Start(ctx)
|
||||
return errors.Wrap(mgr.Start(ctx), "failed to start manager")
|
||||
}
|
||||
|
||||
func (m controllerManager) register(mgr ctrl.Manager) error {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func TestCommand(t *testing.T) {
|
|||
Variable: &testVariable{},
|
||||
},
|
||||
ctxFunc: context.Background,
|
||||
exceptStderr: "failed to connector of \"\" error: failed to init connector for host \"\": host is not set",
|
||||
exceptStderr: "failed to connector of \"\" error: host is not set",
|
||||
},
|
||||
{
|
||||
name: "exec command success",
|
||||
|
|
|
|||
|
|
@ -170,12 +170,12 @@ func (ca copyArgs) copyContent(ctx context.Context, mode fs.FileMode, conn conne
|
|||
|
||||
// relDir when copy.src is relative dir, get all files from project, and copy to remote.
|
||||
func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector) error {
|
||||
if err := pj.WalkDir(ca.src, project.GetFileOption{IsFile: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
|
||||
return pj.WalkDir(ca.src, project.GetFileOption{IsFile: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
|
||||
if d.IsDir() { // only copy file
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk dir %s", ca.src)
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := d.Info()
|
||||
|
|
@ -202,16 +202,8 @@ func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string,
|
|||
dest = filepath.Join(ca.dest, rel)
|
||||
}
|
||||
|
||||
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to work dir %a", ca.src)
|
||||
}
|
||||
|
||||
return nil
|
||||
return conn.PutFile(ctx, data, dest, mode)
|
||||
})
|
||||
}
|
||||
|
||||
// absFile when copy.src is absolute file, get file from os, and copy to remote.
|
||||
|
|
@ -225,16 +217,12 @@ func (ca copyArgs) readFile(ctx context.Context, data []byte, mode fs.FileMode,
|
|||
mode = os.FileMode(*ca.mode)
|
||||
}
|
||||
|
||||
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file")
|
||||
}
|
||||
|
||||
return nil
|
||||
return conn.PutFile(ctx, data, dest, mode)
|
||||
}
|
||||
|
||||
// absDir when copy.src is absolute dir, get all files from os, and copy to remote.
|
||||
func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error {
|
||||
if err := filepath.WalkDir(ca.src, func(path string, d fs.DirEntry, err error) error {
|
||||
return filepath.WalkDir(ca.src, func(path string, d fs.DirEntry, err error) error {
|
||||
if d.IsDir() { // only copy file
|
||||
return nil
|
||||
}
|
||||
|
|
@ -267,14 +255,6 @@ func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error {
|
|||
dest = filepath.Join(ca.dest, rel)
|
||||
}
|
||||
|
||||
if err := conn.PutFile(ctx, data, dest, mode); err != nil {
|
||||
return errors.Wrap(err, "failed to put file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to walk dir %q", ca.src)
|
||||
}
|
||||
|
||||
return nil
|
||||
return conn.PutFile(ctx, data, dest, mode)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,11 +62,11 @@ type genCertArgs struct {
|
|||
func (gca genCertArgs) signedCertificate(cfg *cgutilcert.Config) (string, string) {
|
||||
parentKey, err := TryLoadKeyFromDisk(gca.rootKey)
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("failed to load root key: %+v", err)
|
||||
return "", fmt.Sprintf("failed to load root key: %v", err)
|
||||
}
|
||||
parentCert, _, err := TryLoadCertChainFromDisk(gca.rootCert)
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("failed to load root certificate: %+v", err)
|
||||
return "", fmt.Sprintf("failed to load root certificate: %v", err)
|
||||
}
|
||||
|
||||
if gca.policy == policyIfNotPresent {
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ func (i imagePullArgs) pull(ctx context.Context) error {
|
|||
|
||||
dst, err := newLocalRepository(filepath.Join(domain, src.Reference.Repository)+":"+src.Reference.Reference, i.imagesDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get local repository %q for image %q", i.imagesDir, img)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil {
|
||||
|
|
@ -104,14 +104,14 @@ type imagePushArgs struct {
|
|||
func (i imagePushArgs) push(ctx context.Context) error {
|
||||
manifests, err := findLocalImageManifests(i.imagesDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to find image manifests in local dir %p", i.imagesDir)
|
||||
return err
|
||||
}
|
||||
klog.V(5).Info("manifests found", "manifests", manifests)
|
||||
|
||||
for _, img := range manifests {
|
||||
src, err := newLocalRepository(filepath.Join(domain, img), i.imagesDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get local repository %q for image %q", i.imagesDir, img)
|
||||
return err
|
||||
}
|
||||
repo := src.Reference.Repository
|
||||
if i.namespace != "" {
|
||||
|
|
@ -251,7 +251,7 @@ func findLocalImageManifests(localDir string) ([]string, error) {
|
|||
var manifests []string
|
||||
if err := filepath.WalkDir(localDir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrapf(err, "failed to walkdir %s", path)
|
||||
}
|
||||
|
||||
if path == filepath.Join(localDir, "blobs") {
|
||||
|
|
@ -290,7 +290,7 @@ func findLocalImageManifests(localDir string) ([]string, error) {
|
|||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to work dir %q", localDir)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ type ExecOptions struct {
|
|||
func (o ExecOptions) getAllVariables() (map[string]any, error) {
|
||||
ha, err := o.Variable.Get(variable.GetAllVariable(o.Host))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get host %s variable", o.Host)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vd, ok := ha.(map[string]any)
|
||||
|
|
@ -119,12 +119,12 @@ func getConnector(ctx context.Context, host string, v variable.Variable) (connec
|
|||
} else {
|
||||
conn, err = connector.NewConnector(host, v)
|
||||
if err != nil {
|
||||
return conn, errors.Wrapf(err, "failed to get connector for host %q", host)
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = conn.Init(ctx); err != nil {
|
||||
return conn, errors.Wrapf(err, "failed to init connector for host %q", host)
|
||||
return conn, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
|
|||
func (ta templateArgs) readFile(ctx context.Context, data string, mode fs.FileMode, conn connector.Connector, vars map[string]any) error {
|
||||
result, err := tmpl.Parse(vars, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse file")
|
||||
return err
|
||||
}
|
||||
|
||||
dest := ta.dest
|
||||
|
|
@ -164,21 +164,17 @@ func (ta templateArgs) readFile(ctx context.Context, data string, mode fs.FileMo
|
|||
mode = os.FileMode(*ta.mode)
|
||||
}
|
||||
|
||||
if err := conn.PutFile(ctx, result, dest, mode); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file")
|
||||
}
|
||||
|
||||
return nil
|
||||
return conn.PutFile(ctx, result, dest, mode)
|
||||
}
|
||||
|
||||
// relDir when template.src is relative dir, get all files from project, parse it, and copy to remote.
|
||||
func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector, vars map[string]any) error {
|
||||
if err := pj.WalkDir(ta.src, project.GetFileOption{IsTemplate: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
|
||||
return pj.WalkDir(ta.src, project.GetFileOption{IsTemplate: true, Role: role}, func(path string, d fs.DirEntry, err error) error {
|
||||
if d.IsDir() { // only copy file
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := d.Info()
|
||||
|
|
@ -209,16 +205,8 @@ func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role stri
|
|||
dest = filepath.Join(ta.dest, rel)
|
||||
}
|
||||
|
||||
if err := conn.PutFile(ctx, result, dest, mode); err != nil {
|
||||
return errors.Wrap(err, "failed to put file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to walk dir %q", ta.src)
|
||||
}
|
||||
|
||||
return nil
|
||||
return conn.PutFile(ctx, result, dest, mode)
|
||||
})
|
||||
}
|
||||
|
||||
// absDir when template.src is absolute dir, get all files by os, parse it, and copy to remote.
|
||||
|
|
|
|||
|
|
@ -63,12 +63,12 @@ func newGitProject(ctx context.Context, playbook kkcorev1.Playbook, update bool)
|
|||
if _, err := os.Stat(p.projectDir); os.IsNotExist(err) {
|
||||
// git clone
|
||||
if err := p.gitClone(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to clone git project")
|
||||
return nil, err
|
||||
}
|
||||
} else if update {
|
||||
// git pull
|
||||
if err := p.gitPull(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pull git project")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,19 +34,19 @@ func marshalPlaybook(baseFS fs.FS, pbPath string) (*kkprojectv1.Playbook, error)
|
|||
// convert playbook to kkprojectv1.Playbook
|
||||
pb := &kkprojectv1.Playbook{}
|
||||
if err := loadPlaybook(baseFS, pbPath, pb); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load playbook")
|
||||
return nil, err
|
||||
}
|
||||
// convertRoles.
|
||||
if err := convertRoles(baseFS, pbPath, pb); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert roles")
|
||||
return nil, err
|
||||
}
|
||||
// convertIncludeTasks
|
||||
if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert include tasks")
|
||||
return nil, err
|
||||
}
|
||||
// validate playbook
|
||||
if err := pb.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to validate playbook")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pb, nil
|
||||
|
|
@ -66,15 +66,15 @@ func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
|
|||
|
||||
for _, p := range plays {
|
||||
if err := dealImportPlaybook(p, baseFS, pbPath, pb); err != nil {
|
||||
return errors.Wrapf(err, "failed to load import_playbook in playbook %q", pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dealVarsFiles(&p, baseFS, pbPath); err != nil {
|
||||
return errors.Wrapf(err, "failed to load vars_files in playbook %q", pbPath)
|
||||
return err
|
||||
}
|
||||
// fill block in roles
|
||||
if err := dealRoles(p, baseFS, pbPath); err != nil {
|
||||
return errors.Wrapf(err, "failed to load roles in playbook %q failed: %w", pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
pb.Play = append(pb.Play, p)
|
||||
|
|
@ -91,7 +91,7 @@ func dealImportPlaybook(p kkprojectv1.Play, baseFS fs.FS, pbPath string, pb *kkp
|
|||
return errors.Errorf("import_playbook %q path is empty, it's maybe [project-dir/playbooks/import_playbook_file, playbook-dir/playbooks/import_playbook-file, playbook-dir/import_playbook-file]", p.ImportPlaybook)
|
||||
}
|
||||
if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
|
||||
return errors.Wrapf(err, "failed to load playbook %q", importPlaybook)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -159,11 +159,11 @@ func convertRoles(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error {
|
|||
|
||||
var err error
|
||||
if p.Roles[i].Block, err = convertRoleBlocks(baseFS, pbPath, roleBase); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert role %q tasks in playbook %q", r.Role, pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = convertRoleVars(baseFS, roleBase, &p.Roles[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert role %q defaults in playbook %q", r.Role, pbPath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
pb.Play[i] = p
|
||||
|
|
@ -217,21 +217,21 @@ func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook)
|
|||
var pbBase = filepath.Dir(filepath.Dir(pbPath))
|
||||
for _, play := range pb.Play {
|
||||
if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert pre_tasks file %q", pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert tasks file %q", pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert post_tasks file %q", pbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range play.Roles {
|
||||
roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
|
||||
if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert role %q", filepath.Join(pbPath, r.Role))
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -256,15 +256,15 @@ func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkprojectv1.Block) error
|
|||
}
|
||||
|
||||
if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert block file %q", filepath.Join(baseDir, b.IncludeTasks))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert rescue file %q", filepath.Join(baseDir, b.IncludeTasks))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert always file %q", filepath.Join(baseDir, b.IncludeTasks))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func newLocalProject(playbook kkcorev1.Playbook) (Project, error) {
|
|||
projectDir := filepath.Dir(filepath.Dir(playbook.Spec.Playbook))
|
||||
pb, err := filepath.Rel(projectDir, playbook.Spec.Playbook)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to get rel path for playbook %q", playbook.Spec.Playbook)
|
||||
}
|
||||
|
||||
return &localProject{Playbook: playbook, projectDir: projectDir, playbook: pb}, nil
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ func newAPIIResources(gv schema.GroupVersion) *apiResources {
|
|||
// AddResource add a api-resources
|
||||
func (r *apiResources) AddResource(o resourceOptions) error {
|
||||
if err := o.init(); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize resourceOptions")
|
||||
return err
|
||||
}
|
||||
r.resourceOptions = append(r.resourceOptions, o)
|
||||
storageVersionProvider, isStorageVersionProvider := o.storage.(apirest.StorageVersionProvider)
|
||||
|
|
@ -120,7 +120,7 @@ func (r *apiResources) AddResource(o resourceOptions) error {
|
|||
versioner := storageVersionProvider.StorageVersion()
|
||||
gvk, err := getStorageVersionKind(versioner, o.storage, r.typer)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get storage %q version kind", reflect.TypeOf(o.storage))
|
||||
return err
|
||||
}
|
||||
apiResource.Group = gvk.Group
|
||||
apiResource.Version = gvk.Version
|
||||
|
|
@ -161,11 +161,11 @@ func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage apir
|
|||
object := storage.New()
|
||||
fqKinds, _, err := typer.ObjectKinds(object)
|
||||
if err != nil {
|
||||
return schema.GroupVersionKind{}, err
|
||||
return schema.GroupVersionKind{}, errors.Wrap(err, "failed to get object kind")
|
||||
}
|
||||
gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds)
|
||||
if !ok {
|
||||
return schema.GroupVersionKind{}, errors.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object))
|
||||
return schema.GroupVersionKind{}, errors.Errorf("failed to find the storage version kind for %v", reflect.TypeOf(object))
|
||||
}
|
||||
|
||||
return gvk, nil
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ func (s fileStorage) Create(_ context.Context, key string, obj, out runtime.Obje
|
|||
return errors.Wrapf(err, "failed to encode object %q", key)
|
||||
}
|
||||
if err := decode(s.codec, data, out); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode object %q", key)
|
||||
return err
|
||||
}
|
||||
// render to file
|
||||
if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil {
|
||||
|
|
@ -119,7 +119,7 @@ func (s fileStorage) Delete(ctx context.Context, key string, out runtime.Object,
|
|||
out = cachedExistingObject
|
||||
} else {
|
||||
if err := s.Get(ctx, key, apistorage.GetOptions{}, out); err != nil {
|
||||
return errors.Wrapf(err, "failed to get object %q", key)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -128,7 +128,7 @@ func (s fileStorage) Delete(ctx context.Context, key string, out runtime.Object,
|
|||
}
|
||||
|
||||
if err := validateDeletion(ctx, out); err != nil {
|
||||
return errors.Wrapf(err, "failed to validate deletion for object %q", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// delete object: rename file to trigger watcher, it will actual delete by watcher.
|
||||
|
|
@ -150,11 +150,8 @@ func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions,
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read object file %q", key)
|
||||
}
|
||||
if err := decode(s.codec, data, out); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode object %q", key)
|
||||
}
|
||||
|
||||
return nil
|
||||
return decode(s.codec, data, out)
|
||||
}
|
||||
|
||||
// GetList local resource files.
|
||||
|
|
@ -171,13 +168,13 @@ func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.List
|
|||
// Build matching rules for resource version and continue key.
|
||||
resourceVersionMatchRule, continueKeyMatchRule, err := s.buildMatchRules(key, opts, &sync.Once{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to build matchRules %q", key)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the root entries in the directory corresponding to 'key'.
|
||||
rootEntries, isAllNamespace, err := s.getRootEntries(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get root entries %q", key)
|
||||
return err
|
||||
}
|
||||
|
||||
var lastKey string
|
||||
|
|
@ -286,7 +283,7 @@ func (s fileStorage) processNamespaceDirectory(key string, ns os.DirEntry, v ref
|
|||
for _, entry := range entries {
|
||||
err := s.processResourceFile(nsDir, entry, v, continueKeyMatchRule, resourceVersionMatchRule, lastKey, opts, listObj)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
// Check if we have reached the limit of results requested by the client.
|
||||
if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit {
|
||||
|
|
@ -372,7 +369,7 @@ func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destinati
|
|||
} else {
|
||||
oldObj = s.newFunc()
|
||||
if err := s.Get(ctx, key, apistorage.GetOptions{IgnoreNotFound: ignoreNotFound}, oldObj); err != nil {
|
||||
return errors.Wrapf(err, "failed to get object %q", key)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := preconditions.Check(key, oldObj); err != nil {
|
||||
|
|
@ -389,7 +386,7 @@ func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destinati
|
|||
}
|
||||
out, _, err := tryUpdate(oldObj, apistorage.ResponseMeta{ResourceVersion: oldVersion + 1})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to try update %q", key)
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := runtime.Encode(s.codec, out)
|
||||
|
|
@ -400,7 +397,7 @@ func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destinati
|
|||
if destination != nil {
|
||||
err = decode(s.codec, data, destination)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to decode resource file %q", key)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// render to file
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
|
|
@ -98,7 +99,7 @@ func (f *fileRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource, ex
|
|||
}
|
||||
cacher, err := cacherstorage.NewCacherFromConfig(cacherConfig)
|
||||
if err != nil {
|
||||
return nil, func() {}, err
|
||||
return nil, func() {}, errors.Wrap(err, "failed to new cache")
|
||||
}
|
||||
var once sync.Once
|
||||
destroyFunc := func() {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package proxy
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
|
@ -54,14 +53,14 @@ import (
|
|||
|
||||
// RestConfig replace the restconfig transport to proxy transport
|
||||
func RestConfig(runtimedir string, restconfig *rest.Config) error {
|
||||
var err error
|
||||
restconfig.Transport, err = newProxyTransport(runtimedir, restconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create proxy transport error: %w", err)
|
||||
}
|
||||
restconfig.TLSClientConfig = rest.TLSClientConfig{}
|
||||
transport, err := newProxyTransport(runtimedir, restconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restconfig.Transport = transport
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewProxyTransport return a new http.RoundTripper use in ctrl.client.
|
||||
|
|
@ -92,22 +91,22 @@ func newProxyTransport(runtimedir string, restConfig *rest.Config) (http.RoundTr
|
|||
kkv1alpha1 := newAPIIResources(kkcorev1alpha1.SchemeGroupVersion)
|
||||
storage, err := task.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1alpha1.SchemeGroupVersion))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create task storage")
|
||||
return nil, err
|
||||
}
|
||||
if err := kkv1alpha1.AddResource(resourceOptions{
|
||||
path: "tasks",
|
||||
storage: storage.Task,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add tasks resource")
|
||||
return nil, err
|
||||
}
|
||||
if err := kkv1alpha1.AddResource(resourceOptions{
|
||||
path: "tasks/status",
|
||||
storage: storage.TaskStatus,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add tasks/status resource")
|
||||
return nil, err
|
||||
}
|
||||
if err := lt.registerResources(kkv1alpha1); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to register v1alpha1 resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// when restConfig is null. should store all resource local
|
||||
|
|
@ -117,34 +116,34 @@ func newProxyTransport(runtimedir string, restConfig *rest.Config) (http.RoundTr
|
|||
// add inventory
|
||||
inventoryStorage, err := inventory.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1.SchemeGroupVersion))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create inventory storage")
|
||||
return nil, err
|
||||
}
|
||||
if err := kkv1.AddResource(resourceOptions{
|
||||
path: "inventories",
|
||||
storage: inventoryStorage.Inventory,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add inventories resource")
|
||||
return nil, err
|
||||
}
|
||||
// add playbook
|
||||
playbookStorage, err := playbook.NewStorage(internal.NewFileRESTOptionsGetter(runtimedir, kkcorev1.SchemeGroupVersion))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create playbook storage")
|
||||
return nil, err
|
||||
}
|
||||
if err := kkv1.AddResource(resourceOptions{
|
||||
path: "playbooks",
|
||||
storage: playbookStorage.Playbook,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add playbooks resource")
|
||||
return nil, err
|
||||
}
|
||||
if err := kkv1.AddResource(resourceOptions{
|
||||
path: "playbooks/status",
|
||||
storage: playbookStorage.PlaybookStatus,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add playbooks/status resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := lt.registerResources(kkv1); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to register v1 resources")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -197,7 +196,7 @@ func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) {
|
|||
// dispatch request
|
||||
handler, err := l.detectDispatcher(request)
|
||||
if err != nil {
|
||||
return response, errors.Wrapf(err, "no router for request. url: %s, method: %s", request.URL.Path, request.Method)
|
||||
return response, err
|
||||
}
|
||||
// call handler
|
||||
l.handlerChainFunc(handler).ServeHTTP(&responseWriter{response}, request)
|
||||
|
|
|
|||
|
|
@ -160,11 +160,11 @@ func parseVariable(v any, parseTmplFunc func(string) (string, error)) error {
|
|||
switch reflect.ValueOf(v).Kind() {
|
||||
case reflect.Map:
|
||||
if err := parseVariableFromMap(v, parseTmplFunc); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariableFromMap")
|
||||
return err
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if err := parseVariableFromArray(v, parseTmplFunc); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariableFromArray")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -182,7 +182,7 @@ func parseVariableFromMap(v any, parseTmplFunc func(string) (string, error)) err
|
|||
|
||||
newValue, err := parseTmplFunc(vv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parseTmplFunc of %q", vv)
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
|
@ -195,7 +195,7 @@ func parseVariableFromMap(v any, parseTmplFunc func(string) (string, error)) err
|
|||
}
|
||||
} else {
|
||||
if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariable")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -214,7 +214,7 @@ func parseVariableFromArray(v any, parseTmplFunc func(string) (string, error)) e
|
|||
|
||||
newValue, err := parseTmplFunc(vv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parseTmplFunc of %q", vv)
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
|
@ -227,7 +227,7 @@ func parseVariableFromArray(v any, parseTmplFunc func(string) (string, error)) e
|
|||
}
|
||||
} else {
|
||||
if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariable")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -305,7 +305,7 @@ func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string
|
|||
case string:
|
||||
as, err := tmpl.Parse(d, valv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse variable %q of key %q", valv, key)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ss []string
|
||||
|
|
@ -342,7 +342,7 @@ func IntVar(d map[string]any, vars map[string]any, key string) (*int, error) {
|
|||
case reflect.String:
|
||||
vs, err := tmpl.ParseFunc(d, v.String(), func(b []byte) string { return string(b) })
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse string variable %q of key %q", v.String(), key)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atoi, err := strconv.Atoi(vs)
|
||||
|
|
@ -370,7 +370,7 @@ func BoolVar(d map[string]any, args map[string]any, key string) (*bool, error) {
|
|||
case reflect.String:
|
||||
vs, err := tmpl.ParseBool(d, v.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse string variable %q to bool of key %q", v.String(), key)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ptr.To(vs), nil
|
||||
|
|
@ -383,7 +383,7 @@ func BoolVar(d map[string]any, args map[string]any, key string) (*bool, error) {
|
|||
func DurationVar(d map[string]any, args map[string]any, key string) (time.Duration, error) {
|
||||
stringVar, err := StringVar(d, args, key)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to get string variable of key %q", key)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return time.ParseDuration(stringVar)
|
||||
|
|
@ -443,7 +443,7 @@ func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error
|
|||
|
||||
result, err := tmpl.Parse(d, input)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse %q", input)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(result), nil
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ var _ Source = &fileSource{}
|
|||
// NewFileSource returns a new fileSource.
|
||||
func NewFileSource(path string) (Source, error) {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "failed to stat source path %q", path)
|
||||
}
|
||||
if err := os.MkdirAll(path, os.ModePerm); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create source path %q", path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ func New(ctx context.Context, client ctrlclient.Client, playbook kkcorev1.Playbo
|
|||
path := filepath.Join(_const.GetWorkdirFromConfig(playbook.Spec.Config), _const.RuntimeDir, kkcorev1.SchemeGroupVersion.String(), _const.RuntimePlaybookDir, playbook.Namespace, playbook.Name, _const.RuntimePlaybookVariableDir)
|
||||
s, err = source.NewFileSource(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create file source %q", path)
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported source type: %v", st)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ var MergeRuntimeVariable = func(data map[string]any, hosts ...string) MergeFunc
|
|||
}
|
||||
// parse variable
|
||||
if err := parseVariable(data, runtimeVarParser(v, hostname, data)); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariable")
|
||||
return err
|
||||
}
|
||||
hv := vv.value.Hosts[hostname]
|
||||
hv.RuntimeVars = CombineVariables(hv.RuntimeVars, data)
|
||||
|
|
@ -69,7 +69,7 @@ var MergeAllRuntimeVariable = func(data map[string]any, hostname string) MergeFu
|
|||
}
|
||||
// parse variable
|
||||
if err := parseVariable(data, runtimeVarParser(v, hostname, data)); err != nil {
|
||||
return errors.Wrap(err, "failed to parseVariable")
|
||||
return err
|
||||
}
|
||||
for h := range vv.value.Hosts {
|
||||
if _, ok := v.(*variable); !ok {
|
||||
|
|
@ -88,7 +88,7 @@ func runtimeVarParser(v Variable, hostname string, data map[string]any) func(str
|
|||
return func(s string) (string, error) {
|
||||
curVariable, err := v.Get(GetAllVariable(hostname))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "get host %s variables", hostname)
|
||||
return "", err
|
||||
}
|
||||
cv, ok := curVariable.(map[string]any)
|
||||
if !ok {
|
||||
|
|
|
|||
Loading…
Reference in New Issue