Merge pull request #2275 from ImitationImmortal/feature

feat: add sonobuoy plugin.
This commit is contained in:
KubeSphere CI Bot 2024-06-06 16:44:11 +08:00 committed by GitHub
commit 4700da3cc6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 494 additions and 116 deletions

View File

@ -25,3 +25,9 @@ import (
//go:embed playbooks roles
var BuiltinPipeline embed.FS
//go:embed inventory/inventory.yaml
var DefaultInventory []byte
//go:embed inventory/config.yaml
var DefaultConfig []byte

View File

@ -1,7 +1,7 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Config
metadata:
name: example
name: default
spec:
# zone for kk. how to download files
# kkzone: cn

View File

@ -1,7 +1,7 @@
apiVersion: kubekey.kubesphere.io/v1
kind: Inventory
metadata:
name: example
name: default
spec:
hosts: # your can set all nodes here. or set nodes on special groups.
# localhost: {} localhost is the default host.

View File

@ -4,42 +4,42 @@
command: runc --version
register: runc_install_version
- name: Sync Runc Binary to remote
- name: Sync runc binary to remote
copy:
src: "{{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ binary_type.stdout }}/runc.{{ binary_type.stdout }}"
dest: "/usr/local/bin/runc"
mode: 0755
when: runc_install_version.stderr != ""
- name: Check if Containerd is installed
- name: Check if containerd is installed
ignore_errors: true
command: containerd --version
register: containerd_install_version
- name: Sync Containerd Binary to remote
- name: Sync containerd binary to remote
copy:
src: "{{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ binary_type.stdout }}/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
dest: "/tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz"
when: containerd_install_version.stderr != ""
- name: Unpackage Containerd binary
- name: Unpackage containerd binary
command: |
tar -xvf /tmp/kubekey/containerd-{{ containerd_version|slice:'1:' }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
when: containerd_install_version.stderr != ""
- name: Generate Containerd config file
- name: Generate containerd config file
template:
src: containerd.config
dest: /etc/containerd/config.toml
when: containerd_install_version.stderr != ""
- name: Generate Containerd Service file
- name: Generate containerd Service file
copy:
src: containerd.service
dest: /etc/systemd/system/containerd.service
when: containerd_install_version.stderr != ""
- name: Start Containerd
- name: Start containerd
command: |
systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
when: containerd_install_version.stderr != ""

View File

@ -1,3 +1,6 @@
//go:build builtin
// +build builtin
/*
Copyright 2024 The KubeSphere Authors.
@ -5,7 +8,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -14,14 +17,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// the file contains two default file inventory.yaml and config.yaml
package options
package builtin
import (
"gopkg.in/yaml.v3"
import _ "embed"
"github.com/kubesphere/kubekey/v4/builtin"
)
//go:embed inventory/inventory.yaml
var DefaultInventory []byte
func init() {
if err := yaml.Unmarshal(builtin.DefaultConfig, defaultConfig); err != nil {
panic(err)
}
//go:embed inventory/config.yaml
var DefaultConfig []byte
if err := yaml.Unmarshal(builtin.DefaultInventory, defaultInventory); err != nil {
panic(err)
}
}

View File

@ -29,10 +29,22 @@ import (
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
"github.com/kubesphere/kubekey/v4/builtin"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
)
var defaultConfig = &kubekeyv1.Config{
TypeMeta: metav1.TypeMeta{
APIVersion: kubekeyv1.SchemeGroupVersion.String(),
Kind: "Config",
},
ObjectMeta: metav1.ObjectMeta{Name: "default"}}
var defaultInventory = &kubekeyv1.Inventory{
TypeMeta: metav1.TypeMeta{
APIVersion: kubekeyv1.SchemeGroupVersion.String(),
Kind: "Inventory",
},
ObjectMeta: metav1.ObjectMeta{Name: "default"}}
type CommonOptions struct {
// Playbook which to execute.
Playbook string
@ -69,7 +81,7 @@ func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
gfs := fss.FlagSet("generic")
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
gfs.StringVarP(&o.ConfigFile, "config", "c", o.ConfigFile, "the config file path. support *.yaml ")
gfs.StringSliceVar(&o.Set, "set", o.Set, "set value in config. format --set key=val")
gfs.StringArrayVar(&o.Set, "set", o.Set, "set value in config. format --set key=val")
gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini")
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace")
@ -132,45 +144,34 @@ func (o *CommonOptions) completeRef(pipeline *kubekeyv1.Pipeline) (*kubekeyv1.Co
}
func genConfig(configFile string) (*kubekeyv1.Config, error) {
var (
config = &kubekeyv1.Config{}
cdata []byte
err error
)
if configFile != "" {
cdata, err = os.ReadFile(configFile)
} else {
cdata = builtin.DefaultConfig
cdata, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("read config file error: %w", err)
}
if err := yaml.Unmarshal(cdata, defaultConfig); err != nil {
return nil, fmt.Errorf("unmarshal config file error: %w", err)
}
}
if err != nil {
return nil, fmt.Errorf("read config file error: %w", err)
}
if err := yaml.Unmarshal(cdata, config); err != nil {
return nil, fmt.Errorf("unmarshal config file error: %w", err)
}
return config, nil
return defaultConfig, nil
}
func genInventory(inventoryFile string) (*kubekeyv1.Inventory, error) {
var (
inventory = &kubekeyv1.Inventory{}
cdata []byte
err error
)
if inventoryFile != "" {
cdata, err = os.ReadFile(inventoryFile)
} else {
cdata = builtin.DefaultInventory
cdata, err := os.ReadFile(inventoryFile)
if err != nil {
klog.V(4).ErrorS(err, "read config file error")
return nil, err
}
if err := yaml.Unmarshal(cdata, defaultInventory); err != nil {
klog.V(4).ErrorS(err, "unmarshal config file error")
return nil, err
}
}
if err != nil {
klog.V(4).ErrorS(err, "read config file error")
return nil, err
}
if err := yaml.Unmarshal(cdata, inventory); err != nil {
klog.V(4).ErrorS(err, "unmarshal config file error")
return nil, err
}
return inventory, nil
return defaultInventory, nil
}
func setValue(config *kubekeyv1.Config, key, val string) error {
@ -189,6 +190,10 @@ func setValue(config *kubekeyv1.Config, key, val string) error {
return err
}
return config.SetValue(key, value)
case strings.ToUpper(val) == "TRUE" || strings.ToUpper(val) == "YES" || strings.ToUpper(val) == "Y":
return config.SetValue(key, true)
case strings.ToUpper(val) == "FALSE" || strings.ToUpper(val) == "NO" || strings.ToUpper(val) == "N":
return config.SetValue(key, false)
default:
return config.SetValue(key, val)
}

View File

@ -17,11 +17,19 @@ run-playbook: build
.PHONY: precheck
precheck: build
$(BaseDir)/example/kk-alpha precheck --work-dir=$(BaseDir)/test \
--inventory=$(BaseDir)/inventory.yaml --debug
$(BaseDir)/kk-alpha precheck --work-dir=$(BaseDir)/example/test \
--inventory=$(BaseDir)/example/inventory.yaml --debug
.PHONY: create-cluster
create-cluster: build
$(BaseDir)/example/kk-alpha create cluster --work-dir=$(BaseDir)/test \
--inventory=$(BaseDir)/inventory.yaml \
$(BaseDir)/kk-alpha create cluster --work-dir=$(BaseDir)/example/test \
--inventory=$(BaseDir)/example/inventory.yaml \
--debug
.PHONY: run-plugin
run-local: build
@$(BaseDir)/kk-alpha run --work-dir=$(BaseDir)/example/test \
--project-addr=$(BaseDir)/plugins \
--inventory=$(BaseDir)/example/inventory.yaml \
--set sonobuoy.plugins.kube_bench.enabled=true \
playbooks/sonobuoy.yaml

View File

@ -50,8 +50,10 @@ func init() {
func (c *Config) SetValue(key string, value any) error {
configMap := make(map[string]any)
if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
return err
if c.Spec.Raw != nil {
if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
return err
}
}
// set value
var f func(input map[string]any, key []string, value any) any

View File

@ -35,8 +35,8 @@ type Connector interface {
Init(ctx context.Context) error
// Close closes the connection
Close(ctx context.Context) error
// CopyFile copies a file from local to remote
CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error
// PutFile copies a file from local to remote
PutFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error
// FetchFile copies a file from remote to local
FetchFile(ctx context.Context, remoteFile string, local io.Writer) error
// ExecuteCommand executes a command on the remote host

View File

@ -39,7 +39,7 @@ func (c *localConnector) Close(ctx context.Context) error {
return nil
}
func (c *localConnector) CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
func (c *localConnector) PutFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
// create remote file
if _, err := os.Stat(filepath.Dir(remoteFile)); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(remoteFile), mode); err != nil {

View File

@ -67,7 +67,7 @@ func (c *sshConnector) Close(ctx context.Context) error {
return c.client.Close()
}
func (c *sshConnector) CopyFile(ctx context.Context, src []byte, remoteFile string, mode fs.FileMode) error {
func (c *sshConnector) PutFile(ctx context.Context, src []byte, remoteFile string, mode fs.FileMode) error {
// create sftp client
sftpClient, err := sftp.NewClient(c.client)
if err != nil {

View File

@ -39,6 +39,8 @@ import (
)
const (
// jobLabel set in job or cronJob. value is which pipeline belongs to.
jobLabel = "kubekey.kubesphere.io/pipeline"
defaultExecutorImage = "hub.kubesphere.com.cn/kubekey/executor:latest"
defaultPullPolicy = "IfNotPresent"
defaultServiceAccount = "kk-executor"
@ -67,7 +69,6 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
if pipeline.DeletionTimestamp != nil {
klog.V(5).InfoS("pipeline is deleting", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
return ctrl.Result{}, nil
}
@ -105,19 +106,13 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
switch pipeline.Spec.JobSpec.Schedule {
case "": // pipeline will create job
jobs := &batchv1.JobList{}
if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace)); err != nil {
if !errors.IsNotFound(err) {
return ctrl.Result{}, err
}
} else {
for _, job := range jobs.Items {
for _, ownerReference := range job.OwnerReferences {
if ownerReference.APIVersion == kubekeyv1.SchemeGroupVersion.String() && ownerReference.Kind == "Pipeline" &&
ownerReference.UID == pipeline.UID && ownerReference.Name == pipeline.Name {
return ctrl.Result{}, nil
}
}
}
if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingLabels{
jobLabel: pipeline.Name,
}); err != nil && !errors.IsNotFound(err) {
return ctrl.Result{}, err
} else if len(jobs.Items) != 0 {
// could find exist job
return ctrl.Result{}, nil
}
// create job
@ -125,6 +120,9 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
ObjectMeta: metav1.ObjectMeta{
GenerateName: pipeline.Name + "-",
Namespace: pipeline.Namespace,
Labels: map[string]string{
jobLabel: pipeline.Name,
},
},
Spec: r.GenerateJobSpec(*pipeline),
}
@ -137,29 +135,26 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
}
default: // pipeline will create cronJob
jobs := &batchv1.CronJobList{}
if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace)); err != nil {
if !errors.IsNotFound(err) {
return ctrl.Result{}, err
}
} else {
if err := r.Client.List(ctx, jobs, ctrlclient.InNamespace(pipeline.Namespace), ctrlclient.MatchingLabels{
jobLabel: pipeline.Name,
}); err != nil && !errors.IsNotFound(err) {
return ctrl.Result{}, err
} else if len(jobs.Items) != 0 {
// could find exist cronJob
for _, job := range jobs.Items {
for _, ownerReference := range job.OwnerReferences {
if ownerReference.APIVersion == kubekeyv1.SchemeGroupVersion.String() && ownerReference.Kind == "Pipeline" &&
ownerReference.UID == pipeline.UID && ownerReference.Name == pipeline.Name {
// update cronJob from pipeline, the pipeline status should always be running.
if pipeline.Spec.JobSpec.Suspend != job.Spec.Suspend {
cp := job.DeepCopy()
job.Spec.Suspend = pipeline.Spec.JobSpec.Suspend
// update pipeline status
if err := r.Client.Status().Patch(ctx, &job, ctrlclient.MergeFrom(cp)); err != nil {
klog.V(5).ErrorS(err, "update corn job error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline),
"cronJob", ctrlclient.ObjectKeyFromObject(&job))
}
}
return ctrl.Result{}, nil
// update cronJob from pipeline, the pipeline status should always be running.
if pipeline.Spec.JobSpec.Suspend != job.Spec.Suspend {
cp := job.DeepCopy()
job.Spec.Suspend = pipeline.Spec.JobSpec.Suspend
// update pipeline status
if err := r.Client.Status().Patch(ctx, &job, ctrlclient.MergeFrom(cp)); err != nil {
klog.V(5).ErrorS(err, "update corn job error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline),
"cronJob", ctrlclient.ObjectKeyFromObject(&job))
}
}
}
return ctrl.Result{}, nil
}
// create cornJob
@ -167,6 +162,9 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
ObjectMeta: metav1.ObjectMeta{
GenerateName: pipeline.Name + "-",
Namespace: pipeline.Namespace,
Labels: map[string]string{
jobLabel: pipeline.Name,
},
},
Spec: batchv1.CronJobSpec{
Schedule: pipeline.Spec.JobSpec.Schedule,

View File

@ -22,7 +22,6 @@ import (
"k8s.io/klog/v2"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@ -33,19 +32,14 @@ func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) {
klog.V(4).ErrorS(err, "failed to get host variable", "hostname", options.Host)
return "", err.Error()
}
// args
commandParam, err := variable.Extension2String(ha.(map[string]any), options.Args)
if err != nil {
return "", err.Error()
}
// get connector
conn, err := getConnector(ctx, options.Host, ha.(map[string]any))
if err != nil {
return "", err.Error()
}
defer conn.Close(ctx)
// execute command
command, err := tmpl.ParseString(ha.(map[string]any), commandParam)
// command string
command, err := variable.Extension2String(ha.(map[string]any), options.Args)
if err != nil {
return "", err.Error()
}

View File

@ -44,7 +44,7 @@ func TestCommand(t *testing.T) {
},
{
name: "exec command success",
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
output: []byte("success"),
}),
opt: ExecOptions{
@ -56,7 +56,7 @@ func TestCommand(t *testing.T) {
},
{
name: "exec command failed",
ctx: context.WithValue(context.Background(), "connector", &testConnector{
ctx: context.WithValue(context.Background(), ConnKey, &testConnector{
commandErr: fmt.Errorf("failed"),
}),
opt: ExecOptions{

View File

@ -91,7 +91,15 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
return fmt.Errorf("read file error: %w", err)
}
// copy file to remote
if err := conn.CopyFile(ctx, data, path, mode); err != nil {
var destFilename = destParam
if strings.HasSuffix(destParam, "/") {
rel, err := filepath.Rel(srcParam, path)
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
}
destFilename = filepath.Join(destParam, rel)
}
if err := conn.PutFile(ctx, data, destFilename, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
}
return nil
@ -110,7 +118,7 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
mode = os.FileMode(modeParam)
}
if err := conn.CopyFile(ctx, data, destParam, mode); err != nil {
if err := conn.PutFile(ctx, data, destParam, mode); err != nil {
return "", fmt.Sprintf("copy file error: %v", err)
}
}
@ -145,7 +153,15 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return fmt.Errorf("read file error: %v", err)
}
if err := conn.CopyFile(ctx, data, path, mode); err != nil {
var destFilename = destParam
if strings.HasSuffix(destParam, "/") {
rel, err := pj.Rel(srcParam, path, project.GetFileOption{Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]})
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
}
destFilename = filepath.Join(destParam, rel)
}
if err := conn.PutFile(ctx, data, destFilename, mode); err != nil {
return fmt.Errorf("copy file error: %v", err)
}
return nil
@ -164,7 +180,7 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
mode = os.FileMode(modeParam)
}
if err := conn.CopyFile(ctx, data, destParam, mode); err != nil {
if err := conn.PutFile(ctx, data, destParam, mode); err != nil {
return "", fmt.Sprintf("copy file error: %v", err)
}
}
@ -179,7 +195,7 @@ func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) {
mode = os.FileMode(modeParam)
}
if err := conn.CopyFile(ctx, []byte(contentParam), destParam, mode); err != nil {
if err := conn.PutFile(ctx, []byte(contentParam), destParam, mode); err != nil {
return "", err.Error()
}
}

View File

@ -70,6 +70,7 @@ func init() {
RegisterModule("gen_cert", ModuleGenCert)
}
// ConnKey for connector which store in context
var ConnKey = struct{}{}
func getConnector(ctx context.Context, host string, data map[string]any) (connector.Connector, error) {

View File

@ -66,7 +66,7 @@ func (t testConnector) Close(ctx context.Context) error {
return t.closeErr
}
func (t testConnector) CopyFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
func (t testConnector) PutFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error {
return t.copyErr
}

View File

@ -91,7 +91,15 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
return fmt.Errorf("parse file error: %w", err)
}
// copy file to remote
if err := conn.CopyFile(ctx, []byte(result), path, mode); err != nil {
var destFilename = destParam
if strings.HasSuffix(destParam, "/") {
rel, err := filepath.Rel(srcParam, path)
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
}
destFilename = filepath.Join(destParam, rel)
}
if err := conn.PutFile(ctx, []byte(result), destFilename, mode); err != nil {
return fmt.Errorf("copy file error: %w", err)
}
return nil
@ -114,7 +122,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
mode = os.FileMode(modeParam)
}
if err := conn.CopyFile(ctx, []byte(result), destParam, mode); err != nil {
if err := conn.PutFile(ctx, []byte(result), destParam, mode); err != nil {
return "", fmt.Sprintf("copy file error: %v", err)
}
}
@ -145,7 +153,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
mode = os.FileMode(modeParam)
}
data, err := pj.ReadFile(path, project.GetFileOption{Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]})
data, err := pj.ReadFile(path, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]})
if err != nil {
return fmt.Errorf("read file error: %v", err)
}
@ -153,12 +161,20 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return fmt.Errorf("parse file error: %v", err)
}
if err := conn.CopyFile(ctx, []byte(result), path, mode); err != nil {
var destFilename = destParam
if strings.HasSuffix(destParam, "/") {
rel, err := pj.Rel(srcParam, path, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]})
if err != nil {
return fmt.Errorf("get relative file path error: %w", err)
}
destFilename = filepath.Join(destParam, rel)
}
if err := conn.PutFile(ctx, []byte(result), destFilename, mode); err != nil {
return fmt.Errorf("copy file error: %v", err)
}
return nil
}); err != nil {
return "", fmt.Sprintf("")
return "", fmt.Sprintf("copy file error: %v", err)
}
} else {
data, err := pj.ReadFile(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kubekeyv1alpha1.TaskAnnotationRole]})
@ -176,7 +192,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil {
mode = os.FileMode(modeParam)
}
if err := conn.CopyFile(ctx, []byte(result), destParam, mode); err != nil {
if err := conn.PutFile(ctx, []byte(result), destParam, mode); err != nil {
return "", fmt.Sprintf("copy file error: %v", err)
}
}

View File

@ -96,3 +96,7 @@ func (p builtinProject) ReadFile(path string, option GetFileOption) ([]byte, err
func (p builtinProject) MarshalPlaybook() (*kkcorev1.Playbook, error) {
return marshalPlaybook(p.FS, p.playbook)
}
func (p builtinProject) Rel(root string, path string, option GetFileOption) (string, error) {
return filepath.Rel(p.getFilePath(root, option), path)
}

View File

@ -159,3 +159,7 @@ func (p gitProject) gitPull(ctx context.Context) error {
return nil
}
func (p gitProject) Rel(root string, path string, option GetFileOption) (string, error) {
return filepath.Rel(p.getFilePath(root, option), path)
}

View File

@ -63,6 +63,9 @@ type localProject struct {
}
func (p localProject) getFilePath(path string, o GetFileOption) string {
if filepath.IsAbs(path) {
return path
}
var find []string
switch {
case o.IsFile:
@ -107,3 +110,7 @@ func (p localProject) ReadFile(path string, option GetFileOption) ([]byte, error
func (p localProject) MarshalPlaybook() (*kkcorev1.Playbook, error) {
return marshalPlaybook(os.DirFS(p.projectDir), p.playbook)
}
func (p localProject) Rel(root string, path string, option GetFileOption) (string, error) {
return filepath.Rel(p.getFilePath(root, option), path)
}

View File

@ -34,6 +34,7 @@ type Project interface {
Stat(path string, option GetFileOption) (os.FileInfo, error)
WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error
ReadFile(path string, option GetFileOption) ([]byte, error)
Rel(root string, path string, option GetFileOption) (string, error)
}
type GetFileOption struct {

View File

@ -0,0 +1,6 @@
---
- hosts:
- localhost
gather_facts: true
roles:
- sonobuoy

View File

@ -1,3 +1,4 @@
work_dir: /kubekey
etcd:
env:
data_dir: /var/lib/etcd

View File

@ -1,3 +1,4 @@
work_dir: /kubekey
etcd:
env:
data_dir: /var/lib/etcd

View File

@ -0,0 +1,19 @@
sonobuoy_version: v0.57.1
work_dir: /kubekey
sonobuoy:
amd64: |
{% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_amd64.tar.gz{% else %}https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_amd64.tar.gz{% endif %}
arm64: |
{% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_arm64.tar.gz{% else %}https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_arm64.tar.gz{% endif %}
plugins:
systemd_logs:
enabled: false
e2e:
enabled: false
e2e_ks:
enabled: false
image_registry: registry.cn-beijing.aliyuncs.com/kubesphereio
kube_bench:
enabled: false
image: sonobuoy/kube-bench:v0.6.17
clusters: []

View File

@ -0,0 +1,61 @@
---
- name: Check sonobuoy is exist
ignore_errors: true
command: sonobuoy version
register: sonobuoy_install_version
- name: Get os binary
vars:
supported_architectures:
amd64:
- amd64
- x86_64
arm64:
- arm64
- aarch64
debug:
msg: "{% if (os.architecture in supported_architectures.amd64) %}amd64{% else %}arm64{% endif %}"
register: binary_type
- name: Check binaries for sonobuoy
command: |
artifact_name={{ sonobuoy[binary_type.stdout]|split:"/"|last }}
artifact_path={{ work_dir }}/kubekey/sonobuoy/{{ sonobuoy_version }}/{{ binary_type.stdout }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ sonobuoy[binary_type.stdout] }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
curl -L -o $artifact_path/$artifact_name {{ sonobuoy[binary_type.stdout] }}
fi
when:
- sonobuoy_version | defined && sonobuoy_version != ""
- name: Unpackage binary
command: |
tar -zxvf {{ work_dir }}/kubekey/sonobuoy/{{ sonobuoy_version }}/{{ binary_type.stdout }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/ sonobuoy
when: sonobuoy_install_version.stderr != ""
- name: Generate sonobuoy plugins
template:
src: plugins/
dest: /opt/sonobuoy/plugins/{{ item.name }}/
loop: "{{ clusters }}"
- name: Run sonobuoy
command: |
# run and waiting
sonobuoy run --kubeconfig {{ item.kubeconfig }} --wait \
{% if (plugins.systemd_logs.enabled) %}-p systemd-logs {% endif %}\
{% if (plugins.e2e.enabled) %}-p e2e {% endif %}\
{% if (plugins.e2e_ks.enabled) %}-p /opt/sonobuoy/plugins/{{ item.name }}/e2e-ks.yaml {% endif %}\
{% if (plugins.kube_bench.enabled) %}-p /opt/sonobuoy/plugins/{{ item.name }}/kube-bench.yaml -p /opt/sonobuoy/plugins/{{ item.name }}/kube-bench-master.yaml {% endif %}\
# get result
sonobuoy retrieve -f {{ work_dir }}/results/sonobuoy/{{ item.name }}_result.tar.gz --kubeconfig {{ item.kubeconfig }}
# clear
sonobuoy delete --kubeconfig {{ item.kubeconfig }}
loop: "{{ clusters }}"

View File

@ -0,0 +1,47 @@
podSpec:
containers: [ ]
nodeSelector:
kubernetes.io/os: linux
restartPolicy: Never
serviceAccountName: sonobuoy-serviceaccount
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- key: kubernetes.io/e2e-evict-taint-key
operator: Exists
sonobuoy-config:
driver: Job
plugin-name: e2e-ks
result-format: junit
spec:
command:
- /run_e2e.sh
env:
- name: E2E_EXTRA_ARGS
value: --progress-report-url=http://localhost:8099/progress
- name: E2E_FOCUS
value: \[Conformance\]
- name: E2E_PARALLEL
value: "false"
- name: E2E_USE_GO_RUNNER
value: "true"
- name: RESULTS_DIR
value: /tmp/sonobuoy/results
- name: SONOBUOY_K8S_VERSION
value: {{ item.version }}
- name: SONOBUOY_PROGRESS_PORT
value: "8099"
- name: SONOBUOY
value: "true"
- name: SONOBUOY_CONFIG_DIR
value: /tmp/sonobuoy/config
- name: SONOBUOY_RESULTS_DIR
value: /tmp/sonobuoy/results
image: {{ plugins.e2e_ks.image_registry }}/conformance:{{ item.version }}
name: e2e-ks
volumeMounts:
- mountPath: /tmp/sonobuoy/results
name: results

View File

@ -0,0 +1,86 @@
podSpec:
containers: []
dnsPolicy: ClusterFirstWithHostNet
hostIPC: true
hostNetwork: true
hostPID: true
serviceAccountName: sonobuoy-serviceaccount
tolerations:
- operator: Exists
volumes:
- name: var-lib-etcd
hostPath:
path: "/var/lib/etcd"
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: lib-systemd
hostPath:
path: "/lib/systemd"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
# Uncomment this volume definition if you wish to use Kubernetes version auto-detection in kube-bench.
# - name: usr-bin
# hostPath:
# path: "/usr/bin"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
sonobuoy-config:
driver: DaemonSet
plugin-name: kube-bench-master
result-format: junit
spec:
command:
- /bin/sh
args:
- -c
- /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; sleep 3600; done
env:
- name: KUBERNETES_VERSION
value: {{ item.version }}
- name: TARGET_MASTER
value: "true"
- name: TARGET_NODE
value: "false"
- name: TARGET_CONTROLPLANE
value: "false"
- name: TARGET_ETCD
value: "false"
- name: TARGET_POLICIES
value: "false"
image: {{ plugins.kube_bench.image }}
name: plugin
resources: {}
volumeMounts:
- mountPath: /tmp/sonobuoy/results
name: results
- name: var-lib-etcd
mountPath: /var/lib/etcd
readOnly: true
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: lib-systemd
mountPath: /lib/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
# /usr/bin from the host is mounted to access kubectl / kubelet, used by kube-bench for auto-detecting the Kubernetes version.
# It is mounted at the path /usr/local/mount-from-host/bin to avoid overwriting /usr/bin within the container.
# You can omit this mount if you provide the version using the KUBERNETES_VERSION environment variable.
# - name: usr-bin
# mountPath: /usr/local/mount-from-host/bin
# readOnly: true

View File

@ -0,0 +1,86 @@
podSpec:
containers: []
dnsPolicy: ClusterFirstWithHostNet
hostIPC: true
hostNetwork: true
hostPID: true
serviceAccountName: sonobuoy-serviceaccount
tolerations:
- operator: Exists
volumes:
- name: var-lib-etcd
hostPath:
path: "/var/lib/etcd"
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: lib-systemd
hostPath:
path: "/lib/systemd"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
# Uncomment this volume definition if you wish to use Kubernetes version auto-detection in kube-bench.
# - name: usr-bin
# hostPath:
# path: "/usr/bin"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: DoesNotExist
sonobuoy-config:
driver: DaemonSet
plugin-name: kube-bench-node
result-format: junit
spec:
command:
- /bin/sh
args:
- -c
- /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; /bin/sleep 3600; done
env:
- name: KUBERNETES_VERSION
value: {{ item.version }}
- name: TARGET_MASTER
value: "false"
- name: TARGET_NODE
value: "true"
- name: TARGET_CONTROLPLANE
value: "false"
- name: TARGET_ETCD
value: "false"
- name: TARGET_POLICIES
value: "false"
image: {{ plugins.kube_bench.image }}
name: plugin
resources: {}
volumeMounts:
- mountPath: /tmp/sonobuoy/results
name: results
- name: var-lib-etcd
mountPath: /var/lib/etcd
readOnly: true
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: lib-systemd
mountPath: /lib/systemd
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
# /usr/bin is mounted to access kubectl / kubelet, used by kube-bench for auto-detecting the Kubernetes version.
# It is mounted at the path /usr/local/mount-from-host/bin to avoid overwriting /usr/bin within the container.
# You can omit this mount if you provide the version using the KUBERNETES_VERSION environment variable.
# - name: usr-bin
# mountPath: /usr/local/mount-from-host/bin
# readOnly: true