From 865913fea9bc322caa37e983994f5c77f3292581 Mon Sep 17 00:00:00 2001 From: II <54946465+ImitationImmortal@users.noreply.github.com> Date: Wed, 21 Aug 2024 16:33:36 +0800 Subject: [PATCH] feat: add more golangci lint rule. (#2366) Signed-off-by: joyceliu Co-authored-by: joyceliu --- .golangci.yaml | 776 +++++++++- Makefile | 5 +- builtin/playbooks/artifact_export.yaml | 2 +- .../roles/init/init-artifact/tasks/main.yaml | 5 + .../init/init-os/tasks/init_repository.yaml | 14 +- .../roles/init/init-os/templates/init-os.sh | 12 +- builtin/roles/install/cri/defaults/main.yaml | 2 +- .../install/cri/tasks/install_docker.yaml | 4 +- .../install/image-registry/defaults/main.yaml | 3 +- .../image-registry/tasks/load_images.yaml | 23 +- .../install/kubernetes/defaults/main.yaml | 2 +- .../kubernetes/templates/kubeadm/kubelet.env | 1 - .../precheck/env_check/defaults/main.yaml | 7 +- .../roles/precheck/env_check/tasks/os.yaml | 3 +- cmd/controller-manager/app/options/common.go | 24 +- .../app/options/controller_manager.go | 10 +- cmd/controller-manager/app/server.go | 13 +- cmd/controller-manager/app/version.go | 2 +- cmd/kk/app/artifact.go | 16 +- cmd/kk/app/certs.go | 9 +- cmd/kk/app/create.go | 9 +- cmd/kk/app/init.go | 16 +- cmd/kk/app/options/artifact.go | 38 +- cmd/kk/app/options/certs.go | 17 +- cmd/kk/app/options/common.go | 23 +- cmd/kk/app/options/create.go | 18 +- cmd/kk/app/options/init.go | 37 +- cmd/kk/app/options/option.go | 42 +- cmd/kk/app/options/pipeline.go | 6 +- cmd/kk/app/options/precheck.go | 10 +- cmd/kk/app/options/run.go | 28 +- cmd/kk/app/pipeline.go | 15 +- cmd/kk/app/precheck.go | 8 +- cmd/kk/app/root.go | 12 +- cmd/kk/app/run.go | 12 +- cmd/kk/app/version.go | 2 +- config/{helm => kubekey}/Chart.yaml | 0 .../crds/kubekey.kubesphere.io_configs.yaml | 1 + .../kubekey.kubesphere.io_inventories.yaml | 3 + .../crds/kubekey.kubesphere.io_pipelines.yaml | 7 + .../{helm => kubekey}/templates/_helpers.tpl | 0 .../templates/_tplvalues.tpl | 0 .../templates/deployment.yaml | 0 config/{helm => kubekey}/templates/role.yaml | 0 .../templates/serviceaccount.yaml | 0 config/{helm => kubekey}/values.yaml | 0 docs/zh/005-module.md | 2 + go.mod | 2 +- pkg/apis/core/v1/config_types.go | 35 +- pkg/apis/core/v1/config_types_test.go | 9 +- pkg/apis/core/v1/inventory_types.go | 5 + pkg/apis/core/v1/pipeline_types.go | 15 +- pkg/apis/core/v1alpha1/register.go | 2 +- pkg/apis/core/v1alpha1/task_types.go | 24 +- .../core/v1alpha1/zz_generated.deepcopy.go | 74 +- pkg/apis/project/v1/base.go | 1 + pkg/apis/project/v1/block.go | 125 +- pkg/apis/project/v1/collectionsearch.go | 1 + pkg/apis/project/v1/conditional.go | 13 +- pkg/apis/project/v1/delegatable.go | 1 + pkg/apis/project/v1/handler.go | 1 + pkg/apis/project/v1/loop.go | 1 + pkg/apis/project/v1/notifiable.go | 1 + pkg/apis/project/v1/play.go | 25 +- pkg/apis/project/v1/play_test.go | 8 +- pkg/apis/project/v1/playbook.go | 18 +- pkg/apis/project/v1/playbook_test.go | 3 +- pkg/apis/project/v1/role.go | 8 +- pkg/apis/project/v1/taggable.go | 40 +- pkg/connector/connector.go | 20 +- pkg/connector/helper.go | 1 + ..._ connector.go => kubernetes_connector.go} | 47 +- pkg/connector/local_connector.go | 26 +- pkg/connector/local_connector_test.go | 8 +- pkg/connector/ssh_connector.go | 50 +- pkg/const/common.go | 3 +- pkg/const/helper.go | 1 + pkg/const/scheme.go | 2 +- pkg/const/workdir.go | 10 +- pkg/controllers/pipeline_controller.go | 16 + pkg/converter/converter.go | 13 +- pkg/converter/converter_test.go | 13 +- pkg/converter/internal/functions.go | 9 +- pkg/converter/internal/helper.go | 32 +- pkg/converter/internal/helper_test.go | 12 +- pkg/converter/tmpl/template.go | 14 +- pkg/converter/tmpl/template_test.go | 16 +- pkg/executor/block_executor.go | 209 +++ pkg/executor/block_executor_test.go | 133 ++ pkg/executor/executor.go | 586 +------- pkg/executor/executor_test.go | 73 + pkg/executor/pipeline_executor.go | 276 ++++ pkg/executor/pipeline_executor_test.go | 38 + pkg/executor/task_executor.go | 342 +++++ pkg/executor/task_executor_test.go | 71 + pkg/manager/command_manager.go | 7 +- pkg/manager/controller_manager.go | 3 + pkg/manager/manager.go | 4 + pkg/modules/assert.go | 95 +- pkg/modules/assert_test.go | 7 +- pkg/modules/command.go | 12 +- pkg/modules/command_test.go | 1 + pkg/modules/copy.go | 389 +++-- pkg/modules/copy_test.go | 3 +- pkg/modules/debug.go | 14 +- pkg/modules/debug_test.go | 1 + pkg/modules/fetch.go | 17 +- pkg/modules/fetch_test.go | 1 + pkg/modules/gen_cert.go | 243 ++-- pkg/modules/image.go | 563 +++++--- pkg/modules/image_test.go | 75 + pkg/modules/module.go | 44 +- pkg/modules/module_test.go | 23 +- pkg/modules/set_fact.go | 8 +- pkg/modules/set_fact_test.go | 1 + pkg/modules/template.go | 348 +++-- pkg/modules/template_test.go | 2 + pkg/project/builtin.go | 24 +- pkg/project/git.go | 62 +- pkg/project/helper.go | 269 ++-- pkg/project/helper_test.go | 64 +- pkg/project/local.go | 22 +- pkg/project/project.go | 14 +- pkg/proxy/admit.go | 13 +- pkg/proxy/api_resources.go | 63 +- pkg/proxy/internal/file_storage.go | 488 ++++--- pkg/proxy/internal/rest_option.go | 10 +- pkg/proxy/internal/watcher.go | 163 ++- pkg/proxy/path_expression.go | 30 +- pkg/proxy/resources/config/storage.go | 5 + pkg/proxy/resources/config/strategy.go | 25 +- pkg/proxy/resources/inventory/storage.go | 3 + pkg/proxy/resources/inventory/strategy.go | 41 +- pkg/proxy/resources/pipeline/storage.go | 9 +- pkg/proxy/resources/pipeline/strategy.go | 41 +- pkg/proxy/resources/task/storage.go | 15 +- pkg/proxy/resources/task/strategy.go | 89 +- pkg/proxy/router.go | 2 + pkg/proxy/transport.go | 363 ++--- pkg/variable/helper.go | 263 +++- pkg/variable/helper_test.go | 1 + pkg/variable/internal.go | 320 +++-- pkg/variable/internal_test.go | 18 +- pkg/variable/source/file.go | 24 + pkg/variable/source/memory.go | 24 + pkg/variable/source/source.go | 28 +- pkg/variable/variable.go | 76 +- plugins/playbooks/upgrade_kernel.yaml | 16 + plugins/playbooks/vars/upgrade_kernel.yaml | 2 + .../roles/os/init-kernel/defaults/main.yaml | 5 + .../roles/os/init-kernel/tasks/centos.yaml | 30 + plugins/roles/os/init-kernel/tasks/main.yaml | 3 + .../roles/os/upgrade-kernel/tasks/centos.yaml | 31 + .../roles/os/upgrade-kernel/tasks/main.yaml | 3 + vendor/k8s.io/client-go/testing/actions.go | 698 +++++++++ vendor/k8s.io/client-go/testing/fake.go | 220 +++ vendor/k8s.io/client-go/testing/fixture.go | 581 ++++++++ vendor/k8s.io/client-go/testing/interface.go | 66 + vendor/modules.txt | 4 + .../pkg/client/fake/client.go | 1269 +++++++++++++++++ .../controller-runtime/pkg/client/fake/doc.go | 38 + .../pkg/client/interceptor/intercept.go | 166 +++ .../pkg/internal/objectutil/objectutil.go | 42 + version/version.go | 1 + 164 files changed, 8583 insertions(+), 2694 deletions(-) rename config/{helm => kubekey}/Chart.yaml (100%) rename config/{helm => kubekey}/crds/kubekey.kubesphere.io_configs.yaml (96%) rename config/{helm => kubekey}/crds/kubekey.kubesphere.io_inventories.yaml (93%) rename config/{helm => kubekey}/crds/kubekey.kubesphere.io_pipelines.yaml (99%) rename config/{helm => kubekey}/templates/_helpers.tpl (100%) rename config/{helm => kubekey}/templates/_tplvalues.tpl (100%) rename config/{helm => kubekey}/templates/deployment.yaml (100%) rename config/{helm => kubekey}/templates/role.yaml (100%) rename config/{helm => kubekey}/templates/serviceaccount.yaml (100%) rename config/{helm => kubekey}/values.yaml (100%) rename pkg/connector/{kubernetes_ connector.go => kubernetes_connector.go} (72%) create mode 100644 pkg/executor/block_executor.go create mode 100644 pkg/executor/block_executor_test.go create mode 100644 pkg/executor/executor_test.go create mode 100644 pkg/executor/pipeline_executor.go create mode 100644 pkg/executor/pipeline_executor_test.go create mode 100644 pkg/executor/task_executor.go create mode 100644 pkg/executor/task_executor_test.go create mode 100644 pkg/modules/image_test.go create mode 100644 pkg/variable/source/memory.go create mode 100644 plugins/playbooks/upgrade_kernel.yaml create mode 100644 plugins/playbooks/vars/upgrade_kernel.yaml create mode 100644 plugins/roles/os/init-kernel/defaults/main.yaml create mode 100644 plugins/roles/os/init-kernel/tasks/centos.yaml create mode 100644 plugins/roles/os/init-kernel/tasks/main.yaml create mode 100644 plugins/roles/os/upgrade-kernel/tasks/centos.yaml create mode 100644 plugins/roles/os/upgrade-kernel/tasks/main.yaml create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go create mode 100644 vendor/k8s.io/client-go/testing/interface.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go diff --git a/.golangci.yaml b/.golangci.yaml index cef2f8a5..ee4a7a27 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,46 +1,215 @@ linters: disable-all: true enable: + - asasalint - asciicheck + - bidichk - bodyclose + - canonicalheader - containedctx -# - deadcode -# - depguard + - contextcheck + - copyloopvar + - cyclop + - decorder + - depguard - dogsled +# - dupl + - dupword + - durationcheck +# - err113 - errcheck + - errchkjson + - errname + - errorlint + - exhaustive +# - exhaustruct - exportloopref + - fatcontext + - forbidigo + - forcetypeassert + - funlen # - gci + - ginkgolinter + - gocheckcompilerdirectives +# - gochecknoglobals +# - gochecknoinits + - gochecksumtype + - gocognit - goconst - gocritic + - gocyclo + - godot + - godox - gofmt +# - gofumpt + - goheader - goimports + - gomoddirectives + - gomodguard - goprintffuncname - gosec - gosimple + - gosmopolitan - govet + - grouper - importas + - inamedparam - ineffassign + - interfacebloat + - intrange +# - ireturn +# - lll + - loggercheck + - maintidx + - makezero + - mirror # - misspell +# - mnd +# - musttag - nakedret + - nestif - nilerr + - nilnil + - nlreturn - noctx - nolintlint + - nonamedreturns + - nosprintfhostport +# - paralleltest + - perfsprint - prealloc - predeclared -# - revive + - promlinter + - protogetter + - reassign + - revive - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck - staticcheck -# - structcheck - stylecheck + - tagalign +# - tagliatelle + - tenv + - testableexamples + - testifylint +# - testpackage - thelper + - tparallel - typecheck - unconvert -# - unparam + - unparam - unused -# - varcheck + - usestdlibvars +# - varnamelen + - wastedassign - whitespace +# - wrapcheck +# - wsl + - zerologlint linters-settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 20 + # Should ignore tests. + # Default: false + skip-tests: true + depguard: + # Rules to apply. + # + # Variables: + # - File Variables + # you can still use and exclamation mark ! in front of a variable to say not to use it. + # Example !$test will match any file that is not a go test file. + # + # `$all` - matches all go files + # `$test` - matches all go test files + # + # - Package Variables + # + # `$gostd` - matches all of go's standard library (Pulled from `GOROOT`) + # + # Default: Only allow $gostd in all files. + rules: + # Name of a rule. + main: + # List of allowed packages. + allow: + - $gostd + - github.com/spf13 + - github.com/pkg/sftp + - github.com/google/gops + - github.com/go-git/go-git + - github.com/fsnotify/fsnotify + - github.com/schollz/progressbar + - github.com/stretchr/testify + - github.com/Masterminds/sprig + - github.com/opencontainers/image-spec + - oras.land/oras-go + - k8s.io + - sigs.k8s.io + - github.com/kubesphere/kubekey + exhaustive: + # Enum types matching the supplied regex do not have to be listed in + # switch statements to satisfy exhaustiveness. + # Default: "" + ignore-enum-types: "fsnotify.Op|v1alpha1.TaskPhase|reflect.Kind" + forbidigo: + # Forbid the following identifiers (list of regexp). + # Default: ["^(fmt\\.Print(|f|ln)|print|println)$"] + forbid: + # Builtin function: + - ^print.*$ + # Optional message that gets included in error reports. +# - p: ^fmt\.Print.*$ +# msg: Do not commit print statements. +# # Alternatively, put messages at the end of the regex, surrounded by `(# )?` + # Escape any special characters. Those messages get included in error reports. +# - 'fmt\.Print.*(# Do not commit print statements\.)?' + # Forbid spew Dump, whether it is called as function or method. + # Depends on analyze-types below. + - ^spew\.(ConfigState\.)?Dump$ + # The package name might be ambiguous. + # The full import path can be used as additional criteria. + # Depends on analyze-types below. + - p: ^v1.Dump$ + pkg: ^example.com/pkg/api/v1$ + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: -1 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: -1 + # Ignore comments when counting lines. + # Default false + ignore-comments: true + gocritic: + enabled-tags: + - experimental + disabled-checks: + - appendAssign + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - evalOrder + - ifElseChain + - octalLiteral + - regexpSimplify + - sloppyReassign + - truncateCmp + - typeDefFirst + - unnamedResult + - unnecessaryDefer + - whyNoLint + - wrapperFunc + - commentFormatting + - filepathJoin + # - rangeValCopy + # - hugeParam godot: # declarations - for top level declaration comments (default); # toplevel - for top level comments; @@ -49,8 +218,24 @@ linters-settings: exclude: - '^ \+.*' - '^ ANCHOR.*' + # Check that each sentence ends with a period. + # Default: true + period: false + # Check that each sentence starts with a capital letter. + # Default: false + capital: false + gosec: + excludes: + - G106 # Deferring unsafe method "InsecureIgnoreHostKey" on type "\*ssh" + - G301 # Deferring unsafe method "MkdirAll" on type "\*os.File" + - G304 # Deferring unsafe method "Create" or "Open" on type "\*os.File" + - G306 # Deferring unsafe method "WriteFile" on type "\*os.File" + - G307 # Deferring unsafe method "Close" on type "\*os.File" + - G108 # Profiling endpoint is automatically exposed on /debug/pprof + - G402 # Look for bad TLS connection settings importas: no-unaliased: true +# no-extra-aliases: true alias: # oci - pkg: github.com/opencontainers/image-spec/specs-go/v1 @@ -90,79 +275,536 @@ linters-settings: # kubekey - pkg: "github.com/kubesphere/kubekey/v4/pkg/const" alias: _const - - pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1" - alias: kubekeyv1 - - pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1" - alias: kubekeyv1alpha1 - pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" alias: kkcorev1 + - pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" + alias: kkcorev1alpha1 + - pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + alias: kkprojectv1 + nestif: + # Minimal complexity of if statements to report. + # Default: 5 + min-complexity: 20 nolintlint: allow-unused: false require-specific: true revive: - rules: - # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + # Maximum number of open files at the same time. + # See https://github.com/mgechev/revive#command-line-flags + # Defaults to unlimited. +# max-open-files: 2048 + # When set to false, ignores files with "GENERATED" header, similar to golint. + # See https://github.com/mgechev/revive#available-rules for details. + # Default: false + ignore-generated-header: true + # Sets the default severity. + # See https://github.com/mgechev/revive#configuration + # Default: warning + severity: error + # Enable all available rules. + # Default: false + enable-all-rules: false + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.1 + rules: # v1.3.7 + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#add-constant + - name: add-constant + severity: warning + disabled: true + exclude: [""] + arguments: + - maxLitCount: "3" + allowStrs: '""' + allowInts: "0,1,2" + allowFloats: "0.0,0.,1.0,1.,2.0,2." + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#argument-limit + - name: argument-limit + severity: warning + disabled: true + exclude: [""] + arguments: [4] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#atomic + - name: atomic + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#banned-characters + - name: banned-characters + severity: warning + disabled: true + exclude: [""] + arguments: ["Ω", "Σ", "σ", "7"] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bare-return + - name: bare-return + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - #- name: if-return # TODO This is a recommended rule with many findings which may require it's own pr. - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - #- name: unused-parameter # TODO This is a recommended rule with many findings which may require it's own pr. - - name: unreachable-code - - name: redefines-builtin-id - # - # Rules in addition to the recommended configuration above. - # + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - name: bool-literal-in-expr + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#call-to-gc + - name: call-to-gc + severity: warning + disabled: true + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cognitive-complexity + - name: cognitive-complexity + severity: warning + disabled: true + exclude: [""] + arguments: [7] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#comment-spacings + - name: comment-spacings + severity: warning + disabled: true + exclude: [""] + arguments: + - mypragma + - otherpragma +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-naming + - name: confusing-naming + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-results + - name: confusing-results + severity: warning + disabled: true + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - name: constant-logical-expr - gosec: - excludes: - - G106 # Deferring unsafe method "InsecureIgnoreHostKey" on type "\*ssh" - - G301 # Deferring unsafe method "MkdirAll" on type "\*os.File" - - G304 # Deferring unsafe method "Create" or "Open" on type "\*os.File" - - G306 # Deferring unsafe method "WriteFile" on type "\*os.File" - - G307 # Deferring unsafe method "Close" on type "\*os.File" - - G108 # Profiling endpoint is automatically exposed on /debug/pprof - gocritic: - enabled-tags: - - experimental - disabled-checks: - - appendAssign - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - evalOrder - - ifElseChain - - octalLiteral - - regexpSimplify - - sloppyReassign - - truncateCmp - - typeDefFirst - - unnamedResult - - unnecessaryDefer - - whyNoLint - - wrapperFunc - - commentFormatting - - filepathJoin -# - rangeValCopy -# - hugeParam + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + - name: context-as-argument + severity: warning + disabled: false + exclude: [""] +# arguments: +# - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cyclomatic + - name: cyclomatic + severity: warning + disabled: true + exclude: [""] + arguments: [3] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#datarace + - name: datarace + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + severity: warning + disabled: false + exclude: [""] + arguments: + - ["call-chain", "loop"] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + severity: warning + disabled: false + exclude: [""] + arguments: [] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + severity: warning + disabled: false + exclude: [""] + arguments: + - "preserveScope" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-map-style + - name: enforce-map-style + severity: warning + disabled: false + exclude: [""] + arguments: + - "make" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-repeated-arg-type-style + - name: enforce-repeated-arg-type-style + severity: warning + disabled: true + exclude: [""] + arguments: + - "short" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-slice-style + - name: enforce-slice-style + severity: warning + disabled: false + exclude: [""] + arguments: + - "make" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + severity: warning + disabled: false + exclude: [""] + arguments: + - "checkPrivateReceivers" + - "disableStutteringCheck" + - "sayRepetitiveInsteadOfStutters" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#file-header + - name: file-header + severity: warning + disabled: true + exclude: [""] + arguments: + - This is the text that must appear at the top of source files. +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + severity: warning + disabled: true + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-length + - name: function-length + severity: warning + disabled: true + exclude: [""] + arguments: [10, 0] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-result-limit + - name: function-result-limit + severity: warning + disabled: true + exclude: [""] + arguments: [3] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#get-return + - name: get-return + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-alias-naming + - name: import-alias-naming + severity: warning + disabled: true + exclude: [""] + arguments: + - "^[a-z][a-z0-9]{0,}$" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#imports-blocklist + - name: imports-blocklist + severity: warning + disabled: false + exclude: [""] + arguments: + - "crypto/md5" + - "crypto/sha1" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + severity: warning + disabled: false + exclude: [""] + arguments: + - "preserveScope" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#line-length-limit + - name: line-length-limit + severity: warning + disabled: true + exclude: [""] + arguments: [80] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-control-nesting + - name: max-control-nesting + severity: warning + disabled: true + exclude: [""] + arguments: [3] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-public-structs + - name: max-public-structs + severity: warning + disabled: true + exclude: [""] + arguments: [3] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-parameter + - name: modifies-parameter + severity: warning + disabled: true + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-value-receiver + - name: modifies-value-receiver + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#nested-structs + - name: nested-structs + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#optimize-operands-order + - name: optimize-operands-order + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + severity: warning + disabled: true + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#receiver-naming + - name: receiver-naming + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redundant-import-alias + - name: redundant-import-alias + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + severity: warning + disabled: false + exclude: [""] + arguments: + - - 'core.WriteError[1].Message' + - '/^([^A-Z]|$)/' + - must not start with a capital letter + - - 'fmt.Errorf[0]' + - '/(^|[^\.!?])$/' + - must not end in punctuation + - - panic + - '/^[^\n]*$/' + - must not contain line breaks +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-of-int + - name: string-of-int + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + severity: warning + disabled: true + exclude: [""] + arguments: + - "json,inline" + - "bson,outline,gnu" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + severity: warning + disabled: false + exclude: [""] + arguments: + - "preserveScope" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-naming + - name: time-naming + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unchecked-type-assertion + - name: unchecked-type-assertion + severity: warning + disabled: false + exclude: [""] + arguments: + - acceptIgnoredAssertionResult: true +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming + - name: unexported-naming + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + severity: warning + disabled: false + exclude: [""] + arguments: + - "fmt.Printf" + - "fmt.Fprintf" + - "fmt.Fprint" + - "fmt.Println" + - "bytes.Buffer.WriteString" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unreachable-code + - name: unreachable-code + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: false + exclude: [""] + arguments: + - allowRegex: "^_" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver + - name: unused-receiver + severity: warning + disabled: true + exclude: [""] + arguments: + - allowRegex: "^_" +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#use-any + - name: use-any + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + severity: warning + disabled: false + exclude: [""] +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + severity: warning + disabled: false + exclude: [""] + arguments: + - ["ID"] # AllowList + - ["VM"] # DenyList + - - upperCaseConst: true # Extra parameter (upperCaseConst|skipPackageNameChecks) + skipPackageNameChecks: true +# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + severity: warning + disabled: false + exclude: [""] stylecheck: checks: - -ST1000 # ignore package comment + wrapcheck: + # An array of strings that specify substrings of signatures to ignore. + # If this set, it will override the default set of ignored signatures. + # See https://github.com/tomarrell/wrapcheck#configuration for more information. + # Default: [".Errorf(", "errors.New(", "errors.Unwrap(", "errors.Join(", ".Wrap(", ".Wrapf(", ".WithMessage(", ".WithMessagef(", ".WithStack("] + ignoreSigs: + - .Errorf( + - errors.New( + - errors.Unwrap( + - errors.Join( + - .Wrap( + - .Wrapf( + - .WithMessage( + - .WithMessagef( + - .WithStack( + # An array of strings that specify regular expressions of signatures to ignore. + # Default: [] + ignoreSigRegexps: + - \.New.*Error\( + # An array of strings that specify globs of packages to ignore. + # Default: [] + ignorePackageGlobs: + - encoding/* + - github.com/pkg/* + # An array of strings that specify regular expressions of interfaces to ignore. + # Default: [] + ignoreInterfaceRegexps: + - ^(?i)c(?-i)ach(ing|e) issues: max-same-issues: 0 max-issues-per-linter: 0 diff --git a/Makefile b/Makefile index 649be961..38718abc 100644 --- a/Makefile +++ b/Makefile @@ -171,9 +171,9 @@ generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object .PHONY: generate-manifests-kubekey generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) \ - paths=./pkg/apis/... \ + paths=./pkg/apis/core/... \ crd \ - output:crd:dir=./config/helm/crds/ + output:crd:dir=./config/kubekey/crds/ .PHONY: generate-modules generate-modules: ## Run go mod tidy to ensure modules are up to date @@ -193,7 +193,6 @@ generate-goimports: ## Format all import, `goimports` is required. lint: $(GOLANGCI_LINT) ## Lint the codebase $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) - cd $(TOOLS_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) .PHONY: verify-dockerfiles verify-dockerfiles: diff --git a/builtin/playbooks/artifact_export.yaml b/builtin/playbooks/artifact_export.yaml index b800287e..31ee8c7d 100644 --- a/builtin/playbooks/artifact_export.yaml +++ b/builtin/playbooks/artifact_export.yaml @@ -5,7 +5,7 @@ tasks: - name: Package image image: - pull: "{{ .image_manifests }}" + pull: "{{ .image_manifests | toJson }}" when: .image_manifests | default list | len | lt 0 - name: Export artifact command: | diff --git a/builtin/roles/init/init-artifact/tasks/main.yaml b/builtin/roles/init/init-artifact/tasks/main.yaml index f01a8a24..d8fa482d 100644 --- a/builtin/roles/init/init-artifact/tasks/main.yaml +++ b/builtin/roles/init/init-artifact/tasks/main.yaml @@ -20,6 +20,11 @@ - include_tasks: download_by_curl.yaml # the binaries which download by helm - include_tasks: download_by_helm.yaml + # download remote images to local + - name: Download images + image: + pull: "{{ .image_manifests | toJson }}" + when: .image_manifests | default list | len | lt 0 - include_tasks: pki.yaml tags: ["certs"] diff --git a/builtin/roles/init/init-os/tasks/init_repository.yaml b/builtin/roles/init/init-os/tasks/init_repository.yaml index af829977..ea80cd0f 100644 --- a/builtin/roles/init/init-os/tasks/init_repository.yaml +++ b/builtin/roles/init/init-os/tasks/init_repository.yaml @@ -54,16 +54,16 @@ # add repository rm -rf /etc/yum.repos.d/* cat << EOF > /etc/yum.repos.d/CentOS-local.repo - [base-local] - name=rpms-local + [base-local] + name=rpms-local - baseurl=file://%s + baseurl=file:///tmp/kubekey/repository.iso - enabled=1 + enabled=1 - gpgcheck=0 + gpgcheck=0 - EOF + EOF # update repository yum clean all && yum makecache # install @@ -75,4 +75,4 @@ # install yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm fi - when: .os.release.ID_LIKE | eq "rhel fedora" + when: .os.release.ID_LIKE | eq "\"rhel fedora\"" diff --git a/builtin/roles/init/init-os/templates/init-os.sh b/builtin/roles/init/init-os/templates/init-os.sh index 04c15997..e863aad8 100644 --- a/builtin/roles/init/init-os/templates/init-os.sh +++ b/builtin/roles/init/init-os/templates/init-os.sh @@ -177,7 +177,7 @@ cat >>/etc/hosts<>/etc/hosts</dev/null 2>&1 || update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true - - diff --git a/builtin/roles/install/cri/defaults/main.yaml b/builtin/roles/install/cri/defaults/main.yaml index 55c3a2e9..c116584e 100644 --- a/builtin/roles/install/cri/defaults/main.yaml +++ b/builtin/roles/install/cri/defaults/main.yaml @@ -26,7 +26,7 @@ image_registry: {{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }} {{ .image_registry.ha_vip }} {{- else }} - {{ .groups.image_registry | default list | first }} + {{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }} {{- end }} username: admin password: Harbor12345 diff --git a/builtin/roles/install/cri/tasks/install_docker.yaml b/builtin/roles/install/cri/tasks/install_docker.yaml index 205f3e8f..760ef3c6 100644 --- a/builtin/roles/install/cri/tasks/install_docker.yaml +++ b/builtin/roles/install/cri/tasks/install_docker.yaml @@ -47,10 +47,10 @@ src: | {{ .work_dir }}/kubekey/pki/image_registry.crt dest: | - /etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.crt + /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.cert - name: Sync image registry key file to remote copy: src: | {{ .work_dir }}/kubekey/pki/image_registry.key dest: | - /etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.key + /etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.key diff --git a/builtin/roles/install/image-registry/defaults/main.yaml b/builtin/roles/install/image-registry/defaults/main.yaml index 51f644c1..1b5c0e82 100644 --- a/builtin/roles/install/image-registry/defaults/main.yaml +++ b/builtin/roles/install/image-registry/defaults/main.yaml @@ -6,7 +6,7 @@ image_registry: {{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }} {{ .image_registry.ha_vip }} {{- else }} - {{ .groups.image_registry | default list | first }} + {{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }} {{- end }} username: admin password: Harbor12345 @@ -49,4 +49,3 @@ image_registry: # bucket: bucketname # keyid: mykeyid # rootdirectory: /s3/object/name/prefix - diff --git a/builtin/roles/install/image-registry/tasks/load_images.yaml b/builtin/roles/install/image-registry/tasks/load_images.yaml index a68fe1ef..dee13e14 100644 --- a/builtin/roles/install/image-registry/tasks/load_images.yaml +++ b/builtin/roles/install/image-registry/tasks/load_images.yaml @@ -1,4 +1,11 @@ --- +- name: Sync images to remote + tags: ["only_image"] + copy: + src: | + {{ .work_dir }}/kubekey/images/ + dest: /tmp/kubekey/images/ + - name: Create harbor project for each image tags: ["only_image"] command: | @@ -9,18 +16,11 @@ continue fi - dir_name=${dir##*/} - IFS='=' set -- $dir_name - image_array="$@" - array_length=$# + project=${dir##*/} - if [ "$array_length" -gt 3 ]; then - project=$2 - dest_image=$(shift 2 && echo "$*" | tr ' ' '/') - tag=$(echo "$@" | awk '{print $NF}') - else - echo "unsupported image: $dir_name" - exit 1 + if [ "$project" == "blobs" ]; then + # skip blobs dir + continue fi # if project is not exist, create if @@ -44,6 +44,7 @@ tags: ["only_image"] image: push: + images_dir: /tmp/kubekey/images/ registry: | {{ .image_registry.auth.registry }} namespace_override: | diff --git a/builtin/roles/install/kubernetes/defaults/main.yaml b/builtin/roles/install/kubernetes/defaults/main.yaml index 45e2b375..4796709f 100644 --- a/builtin/roles/install/kubernetes/defaults/main.yaml +++ b/builtin/roles/install/kubernetes/defaults/main.yaml @@ -18,7 +18,7 @@ kubernetes: pod_cidr: 10.233.64.0/18 service_cidr: 10.233.0.0/18 dns_image: | - {{ .k8s_registry }}/coredns/coredns:v1.8.6 + {{ .k8s_registry }}/coredns/coredns:1.8.6 dns_cache_image: | {{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20 dns_service_ip: | diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env index 1d3fd5ed..01ff57a5 100644 --- a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env +++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env @@ -10,4 +10,3 @@ EnvironmentFile=-/etc/default/kubelet Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}" ExecStart= ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS - diff --git a/builtin/roles/precheck/env_check/defaults/main.yaml b/builtin/roles/precheck/env_check/defaults/main.yaml index 965e42d8..50438854 100644 --- a/builtin/roles/precheck/env_check/defaults/main.yaml +++ b/builtin/roles/precheck/env_check/defaults/main.yaml @@ -3,7 +3,11 @@ cluster_require: etcd_disk_wal_fysnc_duration_seconds: 10000000 allow_unsupported_distribution_setup: false # support ubuntu, centos. - supported_os_distributions: ['ubuntu', 'centos'] + supported_os_distributions: + - ubuntu + - '"ubuntu"' + - centos + - '"centos"' require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn'] # the minimal version of kubernetes to be installed. kube_version_min_required: v1.19.10 @@ -24,3 +28,4 @@ cluster_require: arm64: - arm64 - aarch64 + min_kernel_version: 4.9.17 diff --git a/builtin/roles/precheck/env_check/tasks/os.yaml b/builtin/roles/precheck/env_check/tasks/os.yaml index d5282b04..bfe134ed 100644 --- a/builtin/roles/precheck/env_check/tasks/os.yaml +++ b/builtin/roles/precheck/env_check/tasks/os.yaml @@ -34,4 +34,5 @@ - name: Stop if kernel version is too low assert: - that: .os.kernel_version | splitList "-" | first | semverCompare ">=4.9.17" + that: .os.kernel_version | splitList "-" | first | semverCompare (printf ">=%s" .cluster_require.min_kernel_version) + fail_msg: "kernel version: {{ .os.kernel_version }} is too low, required at least: {{ .cluster_require.min_kernel_version }} " diff --git a/cmd/controller-manager/app/options/common.go b/cmd/controller-manager/app/options/common.go index 2f14c67c..875c2009 100644 --- a/cmd/controller-manager/app/options/common.go +++ b/cmd/controller-manager/app/options/common.go @@ -17,10 +17,10 @@ limitations under the License. package options import ( + "context" "flag" "fmt" "os" - "os/signal" "runtime" "runtime/pprof" "strings" @@ -39,16 +39,19 @@ var ( profileOutput string ) +// AddProfilingFlags to NewControllerManagerCommand func AddProfilingFlags(flags *pflag.FlagSet) { flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)") flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to") } -func InitProfiling() error { +// InitProfiling for profileName +func InitProfiling(ctx context.Context) error { var ( f *os.File err error ) + switch profileName { case "none": return nil @@ -57,6 +60,7 @@ func InitProfiling() error { if err != nil { return err } + err = pprof.StartCPUProfile(f) if err != nil { return err @@ -76,22 +80,20 @@ func InitProfiling() error { // If the command is interrupted before the end (ctrl-c), flush the // profiling files - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) go func() { - <-c + <-ctx.Done() if err := f.Close(); err != nil { fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err) } if err := FlushProfiling(); err != nil { fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err) } - os.Exit(0) }() return nil } +// FlushProfiling to local file func FlushProfiling() error { switch profileName { case "none": @@ -100,17 +102,20 @@ func FlushProfiling() error { pprof.StopCPUProfile() case "heap": runtime.GC() + fallthrough default: profile := pprof.Lookup(profileName) if profile == nil { return nil } + f, err := os.Create(profileOutput) if err != nil { return err } defer f.Close() + if err := profile.WriteTo(f, 0); err != nil { return err } @@ -125,11 +130,12 @@ func FlushProfiling() error { var gops bool +// AddGOPSFlags to NewControllerManagerCommand func AddGOPSFlags(flags *pflag.FlagSet) { - flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+ - "controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.") + flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.") } +// InitGOPS if gops is true func InitGOPS() error { if gops { // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc. @@ -138,6 +144,7 @@ func InitGOPS() error { return err } } + return nil } @@ -145,6 +152,7 @@ func InitGOPS() error { // KLOG // ====================================================================================== +// AddKlogFlags to NewControllerManagerCommand func AddKlogFlags(fs *pflag.FlagSet) { local := flag.NewFlagSet("klog", flag.ExitOnError) klog.InitFlags(local) diff --git a/cmd/controller-manager/app/options/controller_manager.go b/cmd/controller-manager/app/options/controller_manager.go index 33e07d73..bcd680d8 100644 --- a/cmd/controller-manager/app/options/controller_manager.go +++ b/cmd/controller-manager/app/options/controller_manager.go @@ -17,10 +17,10 @@ limitations under the License. package options import ( - "github.com/spf13/cobra" cliflag "k8s.io/component-base/cli/flag" ) +// ControllerManagerServerOptions for NewControllerManagerServerOptions type ControllerManagerServerOptions struct { // WorkDir is the baseDir which command find any resource (project etc.) WorkDir string @@ -31,6 +31,7 @@ type ControllerManagerServerOptions struct { LeaderElection bool } +// NewControllerManagerServerOptions for NewControllerManagerCommand func NewControllerManagerServerOptions() *ControllerManagerServerOptions { return &ControllerManagerServerOptions{ WorkDir: "/kubekey", @@ -38,18 +39,21 @@ func NewControllerManagerServerOptions() *ControllerManagerServerOptions { } } +// Flags add to NewControllerManagerCommand func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets { fss := cliflag.NamedFlagSets{} gfs := fss.FlagSet("generic") gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ") - gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.") + gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, "+"will retain runtime data, which includes task execution status and parameters.") cfs := fss.FlagSet("controller-manager") cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.") cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.") + return fss } -func (o *ControllerManagerServerOptions) Complete(cmd *cobra.Command, args []string) { +// Complete for ControllerManagerServerOptions +func (o *ControllerManagerServerOptions) Complete() { // do nothing if o.MaxConcurrentReconciles == 0 { o.MaxConcurrentReconciles = 1 diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index d7e92ca5..614439c0 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -28,8 +28,10 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/manager" ) +// NewControllerManagerCommand operator command. func NewControllerManagerCommand() *cobra.Command { o := options.NewControllerManagerServerOptions() + ctx := signals.SetupSignalHandler() cmd := &cobra.Command{ Use: "controller-manager", @@ -38,13 +40,14 @@ func NewControllerManagerCommand() *cobra.Command { if err := options.InitGOPS(); err != nil { return err } - return options.InitProfiling() + + return options.InitProfiling(ctx) }, PersistentPostRunE: func(*cobra.Command, []string) error { return options.FlushProfiling() }, - RunE: func(cmd *cobra.Command, args []string) error { - o.Complete(cmd, args) + RunE: func(*cobra.Command, []string) error { + o.Complete() // create workdir directory,if not exists _const.SetWorkDir(o.WorkDir) if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) { @@ -52,7 +55,8 @@ func NewControllerManagerCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), o) + + return run(ctx, o) }, } @@ -68,6 +72,7 @@ func NewControllerManagerCommand() *cobra.Command { } cmd.AddCommand(newVersionCommand()) + return cmd } diff --git a/cmd/controller-manager/app/version.go b/cmd/controller-manager/app/version.go index d5e85cb9..84a80c45 100644 --- a/cmd/controller-manager/app/version.go +++ b/cmd/controller-manager/app/version.go @@ -26,7 +26,7 @@ func newVersionCommand() *cobra.Command { return &cobra.Command{ Use: "version", Short: "Print the version of KubeSphere controller-manager", - Run: func(cmd *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, _ []string) { cmd.Println(version.Get()) }, } diff --git a/cmd/kk/app/artifact.go b/cmd/kk/app/artifact.go index e3a88421..415c9ab7 100644 --- a/cmd/kk/app/artifact.go +++ b/cmd/kk/app/artifact.go @@ -20,7 +20,6 @@ import ( "os" "github.com/spf13/cobra" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -34,15 +33,17 @@ func newArtifactCommand() *cobra.Command { cmd.AddCommand(newArtifactExportCommand()) cmd.AddCommand(newArtifactImagesCommand()) + return cmd } func newArtifactExportCommand() *cobra.Command { o := options.NewArtifactExportOptions() + cmd := &cobra.Command{ Use: "export", Short: "Export a KubeKey offline installation package", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"}) if err != nil { return err @@ -55,7 +56,8 @@ func newArtifactExportCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -63,15 +65,17 @@ func newArtifactExportCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } func newArtifactImagesCommand() *cobra.Command { o := options.NewArtifactImagesOptions() + cmd := &cobra.Command{ Use: "images", Short: "push images to a registry from an artifact", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"}) if err != nil { return err @@ -84,7 +88,8 @@ func newArtifactImagesCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -92,6 +97,7 @@ func newArtifactImagesCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/certs.go b/cmd/kk/app/certs.go index fc8bcc04..fff01d07 100644 --- a/cmd/kk/app/certs.go +++ b/cmd/kk/app/certs.go @@ -23,7 +23,6 @@ import ( "os" "github.com/spf13/cobra" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -36,15 +35,17 @@ func newCertsCommand() *cobra.Command { } cmd.AddCommand(newCertsRenewCommand()) + return cmd } func newCertsRenewCommand() *cobra.Command { o := options.NewCertsRenewOptions() + cmd := &cobra.Command{ Use: "renew", Short: "renew a cluster certs", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"}) if err != nil { return err @@ -57,7 +58,8 @@ func newCertsRenewCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -65,6 +67,7 @@ func newCertsRenewCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/create.go b/cmd/kk/app/create.go index 348424f7..0a17466c 100644 --- a/cmd/kk/app/create.go +++ b/cmd/kk/app/create.go @@ -23,7 +23,6 @@ import ( "os" "github.com/spf13/cobra" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -36,15 +35,17 @@ func newCreateCommand() *cobra.Command { } cmd.AddCommand(newCreateClusterCommand()) + return cmd } func newCreateClusterCommand() *cobra.Command { o := options.NewCreateClusterOptions() + cmd := &cobra.Command{ Use: "cluster", Short: "Create a Kubernetes or KubeSphere cluster", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"}) if err != nil { return err @@ -57,7 +58,8 @@ func newCreateClusterCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -65,6 +67,7 @@ func newCreateClusterCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/init.go b/cmd/kk/app/init.go index 4fc9d94b..b37714c8 100644 --- a/cmd/kk/app/init.go +++ b/cmd/kk/app/init.go @@ -20,7 +20,6 @@ import ( "os" "github.com/spf13/cobra" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -34,15 +33,17 @@ func newInitCommand() *cobra.Command { cmd.AddCommand(newInitOSCommand()) cmd.AddCommand(newInitRegistryCommand()) + return cmd } func newInitOSCommand() *cobra.Command { o := options.NewInitOSOptions() + cmd := &cobra.Command{ Use: "os", Short: "Init operating system", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"}) if err != nil { return err @@ -55,7 +56,8 @@ func newInitOSCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -63,15 +65,17 @@ func newInitOSCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } func newInitRegistryCommand() *cobra.Command { o := options.NewInitRegistryOptions() + cmd := &cobra.Command{ Use: "registry", Short: "Init a local image registry", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"}) if err != nil { return err @@ -84,7 +88,8 @@ func newInitRegistryCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -92,6 +97,7 @@ func newInitRegistryCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/options/artifact.go b/cmd/kk/app/options/artifact.go index ad6a49b2..0bc74c8a 100644 --- a/cmd/kk/app/options/artifact.go +++ b/cmd/kk/app/options/artifact.go @@ -30,16 +30,20 @@ import ( // artifact export // ====================================================================================== +// ArtifactExportOptions for NewArtifactExportOptions type ArtifactExportOptions struct { - CommonOptions + commonOptions } +// Flags add to newArtifactExportCommand func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() + return fss } -func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { +// Complete options. create Pipeline, Config and Inventory +func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "artifact-export-", @@ -51,16 +55,16 @@ func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkc } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, Debug: o.Debug, } + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err @@ -69,25 +73,30 @@ func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkc return pipeline, config, inventory, nil } +// NewArtifactExportOptions for newArtifactExportCommand func NewArtifactExportOptions() *ArtifactExportOptions { // set default value - return &ArtifactExportOptions{CommonOptions: newCommonOptions()} + return &ArtifactExportOptions{commonOptions: newCommonOptions()} } // ====================================================================================== // artifact image // ====================================================================================== +// ArtifactImagesOptions for NewArtifactImagesOptions type ArtifactImagesOptions struct { - CommonOptions + commonOptions } +// Flags add to newArtifactImagesCommand func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() + return fss } -func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { +// Complete options. create Pipeline, Config and Inventory +func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "artifact-images-", @@ -99,17 +108,17 @@ func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkc } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, Debug: o.Debug, Tags: []string{"only_image"}, } + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err @@ -118,7 +127,8 @@ func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkc return pipeline, config, inventory, nil } +// NewArtifactImagesOptions for newArtifactImagesCommand func NewArtifactImagesOptions() *ArtifactImagesOptions { // set default value - return &ArtifactImagesOptions{CommonOptions: newCommonOptions()} + return &ArtifactImagesOptions{commonOptions: newCommonOptions()} } diff --git a/cmd/kk/app/options/certs.go b/cmd/kk/app/options/certs.go index 7c47cced..da9e1294 100644 --- a/cmd/kk/app/options/certs.go +++ b/cmd/kk/app/options/certs.go @@ -26,20 +26,25 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// NewCertsRenewOptions for newCertsRenewCommand func NewCertsRenewOptions() *CertsRenewOptions { // set default value - return &CertsRenewOptions{CommonOptions: newCommonOptions()} + return &CertsRenewOptions{commonOptions: newCommonOptions()} } +// CertsRenewOptions for NewCertsRenewOptions type CertsRenewOptions struct { - CommonOptions + commonOptions } +// Flags add to newCertsRenewCommand func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() + return fss } +// Complete options. create Pipeline, Config and Inventory func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ @@ -52,17 +57,17 @@ func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcore } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, Debug: o.Debug, Tags: []string{"certs"}, } + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err diff --git a/cmd/kk/app/options/common.go b/cmd/kk/app/options/common.go index 2f14c67c..3c5571ca 100644 --- a/cmd/kk/app/options/common.go +++ b/cmd/kk/app/options/common.go @@ -17,10 +17,10 @@ limitations under the License. package options import ( + "context" "flag" "fmt" "os" - "os/signal" "runtime" "runtime/pprof" "strings" @@ -39,16 +39,19 @@ var ( profileOutput string ) +// AddProfilingFlags to NewRootCommand func AddProfilingFlags(flags *pflag.FlagSet) { flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)") flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to") } -func InitProfiling() error { +// InitProfiling for profileName +func InitProfiling(ctx context.Context) error { var ( f *os.File err error ) + switch profileName { case "none": return nil @@ -57,6 +60,7 @@ func InitProfiling() error { if err != nil { return err } + err = pprof.StartCPUProfile(f) if err != nil { return err @@ -76,22 +80,22 @@ func InitProfiling() error { // If the command is interrupted before the end (ctrl-c), flush the // profiling files - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) + go func() { - <-c + <-ctx.Done() if err := f.Close(); err != nil { fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err) } + if err := FlushProfiling(); err != nil { fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err) } - os.Exit(0) }() return nil } +// FlushProfiling to local file func FlushProfiling() error { switch profileName { case "none": @@ -100,17 +104,20 @@ func FlushProfiling() error { pprof.StopCPUProfile() case "heap": runtime.GC() + fallthrough default: profile := pprof.Lookup(profileName) if profile == nil { return nil } + f, err := os.Create(profileOutput) if err != nil { return err } defer f.Close() + if err := profile.WriteTo(f, 0); err != nil { return err } @@ -125,11 +132,13 @@ func FlushProfiling() error { var gops bool +// AddGOPSFlags to NewRootCommand func AddGOPSFlags(flags *pflag.FlagSet) { flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+ "controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.") } +// InitGOPS if gops is true func InitGOPS() error { if gops { // Add agent to report additional information such as the current stack trace, Go version, memory stats, etc. @@ -138,6 +147,7 @@ func InitGOPS() error { return err } } + return nil } @@ -145,6 +155,7 @@ func InitGOPS() error { // KLOG // ====================================================================================== +// AddKlogFlags to NewRootCommand func AddKlogFlags(fs *pflag.FlagSet) { local := flag.NewFlagSet("klog", flag.ExitOnError) klog.InitFlags(local) diff --git a/cmd/kk/app/options/create.go b/cmd/kk/app/options/create.go index 65db0147..a9c8f810 100644 --- a/cmd/kk/app/options/create.go +++ b/cmd/kk/app/options/create.go @@ -26,27 +26,32 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// NewCreateClusterOptions for newCreateClusterCommand func NewCreateClusterOptions() *CreateClusterOptions { // set default value - return &CreateClusterOptions{CommonOptions: newCommonOptions()} + return &CreateClusterOptions{commonOptions: newCommonOptions()} } +// CreateClusterOptions for NewCreateClusterOptions type CreateClusterOptions struct { - CommonOptions + commonOptions // kubernetes version which the cluster will install. Kubernetes string // ContainerRuntime for kubernetes. Such as docker, containerd etc. ContainerManager string } +// Flags add to newCreateClusterCommand func (o *CreateClusterOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() kfs := fss.FlagSet("config") kfs.StringVar(&o.Kubernetes, "with-kubernetes", "", "Specify a supported version of kubernetes") kfs.StringVar(&o.ContainerManager, "container-manager", "", "Container runtime: docker, crio, containerd and isula.") + return fss } +// Complete options. create Pipeline, Config and Inventory func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ @@ -59,20 +64,21 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkc } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, Debug: o.Debug, } + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err } + if o.Kubernetes != "" { // override kube_version in config if err := config.SetValue("kube_version", o.Kubernetes); err != nil { diff --git a/cmd/kk/app/options/init.go b/cmd/kk/app/options/init.go index a910da47..1183aeea 100644 --- a/cmd/kk/app/options/init.go +++ b/cmd/kk/app/options/init.go @@ -30,16 +30,20 @@ import ( // init os // ====================================================================================== +// InitOSOptions for NewInitOSOptions type InitOSOptions struct { - CommonOptions + commonOptions } +// Flags add to newInitOSCommand func (o *InitOSOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() + return fss } -func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { +// Complete options. create Pipeline, Config and Inventory +func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "init-os-", @@ -51,16 +55,16 @@ func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pi } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, Debug: o.Debug, } + config, inventory, err := o.completeRef(pipeline) if err != nil { return nil, nil, nil, err @@ -69,25 +73,30 @@ func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pi return pipeline, config, inventory, nil } +// NewInitOSOptions for newInitOSCommand func NewInitOSOptions() *InitOSOptions { // set default value - return &InitOSOptions{CommonOptions: newCommonOptions()} + return &InitOSOptions{commonOptions: newCommonOptions()} } // ====================================================================================== // init registry // ====================================================================================== +// InitRegistryOptions for NewInitRegistryOptions type InitRegistryOptions struct { - CommonOptions + commonOptions } +// Flags add to newInitRegistryCommand func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() + fss := o.commonOptions.flags() + return fss } -func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { +// Complete options. create Pipeline, Config and Inventory +func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "init-registry-", @@ -99,11 +108,10 @@ func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcor } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Playbook: o.Playbook, @@ -117,7 +125,8 @@ func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcor return pipeline, config, inventory, nil } +// NewInitRegistryOptions for newInitRegistryCommand func NewInitRegistryOptions() *InitRegistryOptions { // set default value - return &InitRegistryOptions{CommonOptions: newCommonOptions()} + return &InitRegistryOptions{commonOptions: newCommonOptions()} } diff --git a/cmd/kk/app/options/option.go b/cmd/kk/app/options/option.go index ff755d12..4cae24f5 100644 --- a/cmd/kk/app/options/option.go +++ b/cmd/kk/app/options/option.go @@ -18,6 +18,7 @@ package options import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -45,7 +46,7 @@ var defaultInventory = &kkcorev1.Inventory{ }, ObjectMeta: metav1.ObjectMeta{Name: "default"}} -type CommonOptions struct { +type commonOptions struct { // Playbook which to execute. Playbook string // HostFile is the path of host file @@ -64,10 +65,11 @@ type CommonOptions struct { Namespace string } -func newCommonOptions() CommonOptions { - o := CommonOptions{ +func newCommonOptions() commonOptions { + o := commonOptions{ Namespace: metav1.NamespaceDefault, } + wd, err := os.Getwd() if err != nil { klog.ErrorS(err, "get current dir error") @@ -75,10 +77,11 @@ func newCommonOptions() CommonOptions { } else { o.WorkDir = filepath.Join(wd, "kubekey") } + return o } -func (o *CommonOptions) Flags() cliflag.NamedFlagSets { +func (o *commonOptions) flags() cliflag.NamedFlagSets { fss := cliflag.NamedFlagSets{} gfs := fss.FlagSet("generic") gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ") @@ -88,10 +91,11 @@ func (o *CommonOptions) Flags() cliflag.NamedFlagSets { gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini") gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.") gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace") + return fss } -func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Config, *kkcorev1.Inventory, error) { +func (o *commonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Config, *kkcorev1.Inventory, error) { if !filepath.IsAbs(o.WorkDir) { wd, err := os.Getwd() if err != nil { @@ -99,7 +103,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf } o.WorkDir = filepath.Join(wd, o.WorkDir) } - + // complete config config, err := o.genConfig() if err != nil { return nil, nil, fmt.Errorf("generate config error: %w", err) @@ -112,7 +116,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf APIVersion: config.APIVersion, ResourceVersion: config.ResourceVersion, } - + // complete inventory inventory, err := o.genInventory() if err != nil { return nil, nil, fmt.Errorf("generate inventory error: %w", err) @@ -130,7 +134,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf } // genConfig generate config by ConfigFile and set value by command args. -func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) { +func (o *commonOptions) genConfig() (*kkcorev1.Config, error) { config := defaultConfig.DeepCopy() if o.ConfigFile != "" { cdata, err := os.ReadFile(o.ConfigFile) @@ -142,13 +146,15 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) { return nil, fmt.Errorf("unmarshal config file error: %w", err) } } - // set by command args + // set value by command args if o.Namespace != "" { config.Namespace = o.Namespace } if wd, err := config.GetValue("work_dir"); err == nil && wd != nil { // if work_dir is defined in config, use it. otherwise use current dir. - o.WorkDir = wd.(string) + if workDir, ok := wd.(string); ok { + o.WorkDir = workDir + } } else if err := config.SetValue("work_dir", o.WorkDir); err != nil { return nil, fmt.Errorf("work_dir to config error: %w", err) } @@ -162,7 +168,7 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) { for _, setVal := range strings.Split(unescapeString(s), ",") { i := strings.Index(setVal, "=") if i == 0 || i == -1 { - return nil, fmt.Errorf("--set value should be k=v") + return nil, errors.New("--set value should be k=v") } if err := setValue(config, setVal[:i], setVal[i+1:]); err != nil { return nil, fmt.Errorf("--set value to config error: %w", err) @@ -174,21 +180,23 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) { } // genConfig generate config by ConfigFile and set value by command args. -func (o *CommonOptions) genInventory() (*kkcorev1.Inventory, error) { +func (o *commonOptions) genInventory() (*kkcorev1.Inventory, error) { inventory := defaultInventory.DeepCopy() if o.InventoryFile != "" { cdata, err := os.ReadFile(o.InventoryFile) if err != nil { klog.V(4).ErrorS(err, "read config file error") + return nil, err } inventory = &kkcorev1.Inventory{} if err := yaml.Unmarshal(cdata, inventory); err != nil { klog.V(4).ErrorS(err, "unmarshal config file error") + return nil, err } } - // set by command args + // set value by command args if o.Namespace != "" { inventory.Namespace = o.Namespace } @@ -197,9 +205,9 @@ func (o *CommonOptions) genInventory() (*kkcorev1.Inventory, error) { } // setValue set key: val in config. -// if val is json string. convert to map or slice -// if val is TRUE,YES,Y. convert to bool type true. -// if val is FALSE,NO,N. convert to bool type false. +// If val is json string. convert to map or slice +// If val is TRUE,YES,Y. convert to bool type true. +// If val is FALSE,NO,N. convert to bool type false. func setValue(config *kkcorev1.Config, key, val string) error { switch { case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"): @@ -208,6 +216,7 @@ func setValue(config *kkcorev1.Config, key, val string) error { if err != nil { return err } + return config.SetValue(key, value) case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"): var value []any @@ -215,6 +224,7 @@ func setValue(config *kkcorev1.Config, key, val string) error { if err != nil { return err } + return config.SetValue(key, value) case strings.EqualFold(val, "TRUE") || strings.EqualFold(val, "YES") || strings.EqualFold(val, "Y"): return config.SetValue(key, true) diff --git a/cmd/kk/app/options/pipeline.go b/cmd/kk/app/options/pipeline.go index 455649c9..0624fb8a 100644 --- a/cmd/kk/app/options/pipeline.go +++ b/cmd/kk/app/options/pipeline.go @@ -5,24 +5,28 @@ import ( cliflag "k8s.io/component-base/cli/flag" ) +// PipelineOptions for NewPipelineOptions type PipelineOptions struct { Name string Namespace string WorkDir string } -func NewPipelineOption() *PipelineOptions { +// NewPipelineOptions for newPipelineCommand +func NewPipelineOptions() *PipelineOptions { return &PipelineOptions{ Namespace: metav1.NamespaceDefault, WorkDir: "/kubekey", } } +// Flags add to newPipelineCommand func (o *PipelineOptions) Flags() cliflag.NamedFlagSets { fss := cliflag.NamedFlagSets{} pfs := fss.FlagSet("pipeline flags") pfs.StringVar(&o.Name, "name", o.Name, "name of pipeline") pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of pipeline") pfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ") + return fss } diff --git a/cmd/kk/app/options/precheck.go b/cmd/kk/app/options/precheck.go index a24922c7..49dacc86 100644 --- a/cmd/kk/app/options/precheck.go +++ b/cmd/kk/app/options/precheck.go @@ -26,19 +26,23 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// NewPreCheckOptions for newPreCheckCommand func NewPreCheckOptions() *PreCheckOptions { // set default value - return &PreCheckOptions{CommonOptions: newCommonOptions()} + return &PreCheckOptions{commonOptions: newCommonOptions()} } +// PreCheckOptions for NewPreCheckOptions type PreCheckOptions struct { - CommonOptions + commonOptions } +// Flags add to newPreCheckCommand func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets { - return o.CommonOptions.Flags() + return o.commonOptions.flags() } +// Complete options. create Pipeline, Config and Inventory func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cmd/kk/app/options/run.go b/cmd/kk/app/options/run.go index dc3547fe..814d8b73 100644 --- a/cmd/kk/app/options/run.go +++ b/cmd/kk/app/options/run.go @@ -26,8 +26,9 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) -type KubekeyRunOptions struct { - CommonOptions +// KubeKeyRunOptions for NewKubeKeyRunOptions +type KubeKeyRunOptions struct { + commonOptions // ProjectAddr is the storage for executable packages (in Ansible format). // When starting with http or https, it will be obtained from a Git repository. // When starting with file path, it will be obtained from the local path. @@ -49,16 +50,19 @@ type KubekeyRunOptions struct { SkipTags []string } -func NewKubeKeyRunOptions() *KubekeyRunOptions { +// NewKubeKeyRunOptions for newRunCommand +func NewKubeKeyRunOptions() *KubeKeyRunOptions { // add default values - o := &KubekeyRunOptions{ - CommonOptions: newCommonOptions(), + o := &KubeKeyRunOptions{ + commonOptions: newCommonOptions(), } + return o } -func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets { - fss := o.CommonOptions.Flags() +// Flags add to newRunCommand +func (o *KubeKeyRunOptions) Flags() cliflag.NamedFlagSets { + fss := o.commonOptions.flags() gitfs := fss.FlagSet("project") gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+ " When starting with http or https, it will be obtained from a Git repository."+ @@ -75,20 +79,20 @@ func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets { return fss } -func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { +// Complete options. create Pipeline, Config and Inventory +func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) { pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "run-", Namespace: o.Namespace, - Annotations: map[string]string{}, + Annotations: make(map[string]string), }, } // complete playbook. now only support one playbook - if len(args) == 1 { - o.Playbook = args[0] - } else { + if len(args) != 1 { return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath()) } + o.Playbook = args[0] pipeline.Spec = kkcorev1.PipelineSpec{ Project: kkcorev1.PipelineProject{ diff --git a/cmd/kk/app/pipeline.go b/cmd/kk/app/pipeline.go index 11a1ddcd..0ab3af60 100644 --- a/cmd/kk/app/pipeline.go +++ b/cmd/kk/app/pipeline.go @@ -8,7 +8,6 @@ import ( "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" @@ -18,12 +17,12 @@ import ( ) func newPipelineCommand() *cobra.Command { - o := options.NewPipelineOption() + o := options.NewPipelineOptions() cmd := &cobra.Command{ Use: "pipeline", Short: "Executor a pipeline in kubernetes", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { _const.SetWorkDir(o.WorkDir) restconfig, err := ctrl.GetConfig() if err != nil { @@ -34,28 +33,31 @@ func newPipelineCommand() *cobra.Command { if err != nil { return fmt.Errorf("could not get rest config: %w", err) } + client, err := ctrlclient.New(restconfig, ctrlclient.Options{ Scheme: _const.Scheme, }) if err != nil { return fmt.Errorf("could not create client: %w", err) } - ctx := signals.SetupSignalHandler() + // get pipeline var pipeline = new(kkcorev1.Pipeline) - var config = new(kkcorev1.Config) - var inventory = new(kkcorev1.Inventory) if err := client.Get(ctx, ctrlclient.ObjectKey{ Name: o.Name, Namespace: o.Namespace, }, pipeline); err != nil { return err } + // get config + var config = new(kkcorev1.Config) if err := client.Get(ctx, ctrlclient.ObjectKey{ Name: pipeline.Spec.ConfigRef.Name, Namespace: pipeline.Spec.ConfigRef.Namespace, }, config); err != nil { return err } + // get inventory + var inventory = new(kkcorev1.Inventory) if err := client.Get(ctx, ctrlclient.ObjectKey{ Name: pipeline.Spec.InventoryRef.Name, Namespace: pipeline.Spec.InventoryRef.Namespace, @@ -76,5 +78,6 @@ func newPipelineCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { fs.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/precheck.go b/cmd/kk/app/precheck.go index 243af594..fdaf84ea 100644 --- a/cmd/kk/app/precheck.go +++ b/cmd/kk/app/precheck.go @@ -23,7 +23,6 @@ import ( "os" "github.com/spf13/cobra" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -37,8 +36,7 @@ func newPreCheckCommand() *cobra.Command { Short: "Check if the nodes is eligible for cluster deployment.", Long: "the tags can specify check items. support: etcd, os, network, cri, nfs.", RunE: func(cmd *cobra.Command, args []string) error { - args = append(args, "playbooks/precheck.yaml") - pipeline, config, inventory, err := o.Complete(cmd, args) + pipeline, config, inventory, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml")) if err != nil { return err } @@ -50,7 +48,8 @@ func newPreCheckCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), pipeline, config, inventory) + + return run(ctx, pipeline, config, inventory) }, } @@ -58,6 +57,7 @@ func newPreCheckCommand() *cobra.Command { for _, f := range o.Flags().FlagSets { flags.AddFlagSet(f) } + return cmd } diff --git a/cmd/kk/app/root.go b/cmd/kk/app/root.go index b3f4f950..a4225731 100644 --- a/cmd/kk/app/root.go +++ b/cmd/kk/app/root.go @@ -18,11 +18,15 @@ package app import ( "github.com/spf13/cobra" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" ) -var internalCommand = []*cobra.Command{} +// ctx cancel by shutdown signal +var ctx = signals.SetupSignalHandler() + +var internalCommand = make([]*cobra.Command, 0) func registerInternalCommand(command *cobra.Command) { for _, c := range internalCommand { @@ -34,6 +38,7 @@ func registerInternalCommand(command *cobra.Command) { internalCommand = append(internalCommand, command) } +// NewRootCommand console command. func NewRootCommand() *cobra.Command { cmd := &cobra.Command{ Use: "kk", @@ -42,12 +47,14 @@ func NewRootCommand() *cobra.Command { if err := options.InitGOPS(); err != nil { return err } - return options.InitProfiling() + + return options.InitProfiling(ctx) }, PersistentPostRunE: func(*cobra.Command, []string) error { return options.FlushProfiling() }, } + cmd.SetContext(ctx) // add common flag flags := cmd.PersistentFlags() @@ -60,5 +67,6 @@ func NewRootCommand() *cobra.Command { cmd.AddCommand(newVersionCommand()) // internal command cmd.AddCommand(internalCommand...) + return cmd } diff --git a/cmd/kk/app/run.go b/cmd/kk/app/run.go index c27ec134..2f6d0975 100644 --- a/cmd/kk/app/run.go +++ b/cmd/kk/app/run.go @@ -25,7 +25,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/kubesphere/kubekey/v4/cmd/kk/app/options" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" @@ -53,13 +52,15 @@ func newRunCommand() *cobra.Command { return err } } - return run(signals.SetupSignalHandler(), kk, config, inventory) + + return run(ctx, kk, config, inventory) }, } for _, f := range o.Flags().FlagSets { cmd.Flags().AddFlagSet(f) } + return cmd } @@ -75,18 +76,23 @@ func run(ctx context.Context, pipeline *kkcorev1.Pipeline, config *kkcorev1.Conf return fmt.Errorf("could not get runtime-client: %w", err) } - // create config, inventory and pipeline + // create config if err := client.Create(ctx, config); err != nil { klog.ErrorS(err, "Create config error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return err } + // create inventory if err := client.Create(ctx, inventory); err != nil { klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return err } + // create pipeline pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning if err := client.Create(ctx, pipeline); err != nil { klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return err } diff --git a/cmd/kk/app/version.go b/cmd/kk/app/version.go index d5e85cb9..84a80c45 100644 --- a/cmd/kk/app/version.go +++ b/cmd/kk/app/version.go @@ -26,7 +26,7 @@ func newVersionCommand() *cobra.Command { return &cobra.Command{ Use: "version", Short: "Print the version of KubeSphere controller-manager", - Run: func(cmd *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, _ []string) { cmd.Println(version.Get()) }, } diff --git a/config/helm/Chart.yaml b/config/kubekey/Chart.yaml similarity index 100% rename from config/helm/Chart.yaml rename to config/kubekey/Chart.yaml diff --git a/config/helm/crds/kubekey.kubesphere.io_configs.yaml b/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml similarity index 96% rename from config/helm/crds/kubekey.kubesphere.io_configs.yaml rename to config/kubekey/crds/kubekey.kubesphere.io_configs.yaml index 07c0b439..ebee9237 100644 --- a/config/helm/crds/kubekey.kubesphere.io_configs.yaml +++ b/config/kubekey/crds/kubekey.kubesphere.io_configs.yaml @@ -17,6 +17,7 @@ spec: - name: v1 schema: openAPIV3Schema: + description: Config store global vars for playbook. properties: apiVersion: description: |- diff --git a/config/helm/crds/kubekey.kubesphere.io_inventories.yaml b/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml similarity index 93% rename from config/helm/crds/kubekey.kubesphere.io_inventories.yaml rename to config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml index 86dc7db9..dc0ecaf8 100644 --- a/config/helm/crds/kubekey.kubesphere.io_inventories.yaml +++ b/config/kubekey/crds/kubekey.kubesphere.io_inventories.yaml @@ -17,6 +17,7 @@ spec: - name: v1 schema: openAPIV3Schema: + description: Inventory store hosts vars for playbook. properties: apiVersion: description: |- @@ -36,9 +37,11 @@ spec: metadata: type: object spec: + description: InventorySpec of Inventory properties: groups: additionalProperties: + description: InventoryGroup of Inventory properties: groups: items: diff --git a/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml b/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml similarity index 99% rename from config/helm/crds/kubekey.kubesphere.io_pipelines.yaml rename to config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml index 0d205b68..775f6659 100644 --- a/config/helm/crds/kubekey.kubesphere.io_pipelines.yaml +++ b/config/kubekey/crds/kubekey.kubesphere.io_pipelines.yaml @@ -30,6 +30,7 @@ spec: name: v1 schema: openAPIV3Schema: + description: Pipeline resource executor a playbook. properties: apiVersion: description: |- @@ -49,6 +50,7 @@ spec: metadata: type: object spec: + description: PipelineSpec of pipeline. properties: configRef: description: ConfigRef is the global variable configuration for playbook @@ -1972,14 +1974,19 @@ spec: - playbook type: object status: + description: PipelineStatus of Pipeline properties: failedDetail: description: FailedDetail will record the failed tasks. items: + description: PipelineFailedDetail store failed message when pipeline + run failed. properties: hosts: description: failed Hosts Result of failed task. items: + description: PipelineFailedDetailHost detail failed message + for each host. properties: host: description: Host name of failed task. diff --git a/config/helm/templates/_helpers.tpl b/config/kubekey/templates/_helpers.tpl similarity index 100% rename from config/helm/templates/_helpers.tpl rename to config/kubekey/templates/_helpers.tpl diff --git a/config/helm/templates/_tplvalues.tpl b/config/kubekey/templates/_tplvalues.tpl similarity index 100% rename from config/helm/templates/_tplvalues.tpl rename to config/kubekey/templates/_tplvalues.tpl diff --git a/config/helm/templates/deployment.yaml b/config/kubekey/templates/deployment.yaml similarity index 100% rename from config/helm/templates/deployment.yaml rename to config/kubekey/templates/deployment.yaml diff --git a/config/helm/templates/role.yaml b/config/kubekey/templates/role.yaml similarity index 100% rename from config/helm/templates/role.yaml rename to config/kubekey/templates/role.yaml diff --git a/config/helm/templates/serviceaccount.yaml b/config/kubekey/templates/serviceaccount.yaml similarity index 100% rename from config/helm/templates/serviceaccount.yaml rename to config/kubekey/templates/serviceaccount.yaml diff --git a/config/helm/values.yaml b/config/kubekey/values.yaml similarity index 100% rename from config/helm/values.yaml rename to config/kubekey/values.yaml diff --git a/docs/zh/005-module.md b/docs/zh/005-module.md index cd86157f..264ba262 100644 --- a/docs/zh/005-module.md +++ b/docs/zh/005-module.md @@ -99,6 +99,7 @@ gen_cert: 拉取镜像到本地目录, 或推送镜像到远程服务器 ```yaml image: + skip_tls_verify: true pull: ["image1", "image2"] push: registry: local.kubekey @@ -106,6 +107,7 @@ image: password: password namespace_override: new_namespace ``` +**skip_tls_verify**: 跳过证书认证. 默认true. **pull**: 拉取镜像到本地工作目录, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值. **push**: 推送工作目录中的镜像到远程仓库, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值. **registry**: 远程仓库地址, 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值. diff --git a/go.mod b/go.mod index 9b454569..84ada696 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/go-git/go-git/v5 v5.11.0 github.com/google/gops v0.3.28 github.com/opencontainers/image-spec v1.1.0 - github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/schollz/progressbar/v3 v3.14.5 github.com/spf13/cobra v1.8.0 @@ -84,6 +83,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect diff --git a/pkg/apis/core/v1/config_types.go b/pkg/apis/core/v1/config_types.go index 277494b2..3d3a1a61 100644 --- a/pkg/apis/core/v1/config_types.go +++ b/pkg/apis/core/v1/config_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "fmt" "reflect" "strings" @@ -30,6 +31,7 @@ import ( // +k8s:openapi-gen=true // +kubebuilder:resource:scope=Namespaced +// Config store global vars for playbook. type Config struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -38,6 +40,7 @@ type Config struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ConfigList of Config type ConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` @@ -60,15 +63,26 @@ func (c *Config) SetValue(key string, value any) error { // set value var f func(input map[string]any, key []string, value any) any f = func(input map[string]any, key []string, value any) any { - if len(key) == 1 { - input[key[0]] = value - } else if len(key) > 1 { - if v, ok := input[key[0]]; ok && reflect.TypeOf(v).Kind() == reflect.Map { - input[key[0]] = f(v.(map[string]any), key[1:], value) - } else { - input[key[0]] = f(make(map[string]any), key[1:], value) - } + if len(key) == 0 { + return input } + + firstKey := key[0] + if len(key) == 1 { + input[firstKey] = value + + return input + } + + // Handle nested maps + if v, ok := input[firstKey]; ok && reflect.TypeOf(v).Kind() == reflect.Map { + if vd, ok := v.(map[string]any); ok { + input[firstKey] = f(vd, key[1:], value) + } + } else { + input[firstKey] = f(make(map[string]any), key[1:], value) + } + return input } data, err := json.Marshal(f(configMap, strings.Split(key, "."), value)) @@ -76,6 +90,7 @@ func (c *Config) SetValue(key string, value any) error { return err } c.Spec.Raw = data + return nil } @@ -86,6 +101,7 @@ func (c *Config) GetValue(key string) (any, error) { if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil { return nil, err } + // get all value if key == "" { return configMap, nil } @@ -95,9 +111,10 @@ func (c *Config) GetValue(key string) (any, error) { r, ok := result.(map[string]any) if !ok { // cannot find value - return nil, nil + return nil, fmt.Errorf("cannot find key: %s", key) } result = r[k] } + return result, nil } diff --git a/pkg/apis/core/v1/config_types_test.go b/pkg/apis/core/v1/config_types_test.go index 95202208..52a4ed44 100644 --- a/pkg/apis/core/v1/config_types_test.go +++ b/pkg/apis/core/v1/config_types_test.go @@ -54,7 +54,9 @@ func TestSetValue(t *testing.T) { in := Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}} t.Run(tc.name, func(t *testing.T) { err := in.SetValue(tc.key, tc.val) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, tc.except, in) }) } @@ -71,7 +73,7 @@ func TestGetValue(t *testing.T) { name: "all value", key: "", config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}}, - except: map[string]interface{}{ + except: map[string]any{ "a": int64(1), }, }, @@ -103,8 +105,7 @@ func TestGetValue(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - value, err := tc.config.GetValue(tc.key) - assert.NoError(t, err) + value, _ := tc.config.GetValue(tc.key) assert.Equal(t, tc.except, value) }) } diff --git a/pkg/apis/core/v1/inventory_types.go b/pkg/apis/core/v1/inventory_types.go index 27365889..a2d5879a 100644 --- a/pkg/apis/core/v1/inventory_types.go +++ b/pkg/apis/core/v1/inventory_types.go @@ -21,14 +21,17 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// InventoryHost of Inventory type InventoryHost map[string]runtime.RawExtension +// InventoryGroup of Inventory type InventoryGroup struct { Groups []string `json:"groups,omitempty"` Hosts []string `json:"hosts,omitempty"` Vars runtime.RawExtension `json:"vars,omitempty"` } +// InventorySpec of Inventory type InventorySpec struct { // Hosts is all nodes Hosts InventoryHost `json:"hosts,omitempty"` @@ -46,6 +49,7 @@ type InventorySpec struct { // +k8s:openapi-gen=true // +kubebuilder:resource:scope=Namespaced +// Inventory store hosts vars for playbook. type Inventory struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -55,6 +59,7 @@ type Inventory struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// InventoryList of Inventory type InventoryList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/core/v1/pipeline_types.go b/pkg/apis/core/v1/pipeline_types.go index e56e9eb6..fa2d93e4 100644 --- a/pkg/apis/core/v1/pipeline_types.go +++ b/pkg/apis/core/v1/pipeline_types.go @@ -21,12 +21,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// PipelinePhase of Pipeline type PipelinePhase string const ( + // PipelinePhasePending of Pipeline. Pipeline has created but not deal PipelinePhasePending PipelinePhase = "Pending" + // PipelinePhaseRunning of Pipeline. deal Pipeline. PipelinePhaseRunning PipelinePhase = "Running" - PipelinePhaseFailed PipelinePhase = "Failed" + // PipelinePhaseFailed of Pipeline. once Task run failed. + PipelinePhaseFailed PipelinePhase = "Failed" + // PipelinePhaseSucceed of Pipeline. all Tasks run success. PipelinePhaseSucceed PipelinePhase = "Succeed" ) @@ -35,6 +40,7 @@ const ( BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project" ) +// PipelineSpec of pipeline. type PipelineSpec struct { // Project is storage for executable packages // +optional @@ -118,6 +124,7 @@ type PipelineJobSpec struct { VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` } +// PipelineProject respect which playbook store. type PipelineProject struct { // Addr is the storage for executable packages (in Ansible file format). // When starting with http or https, it will be obtained from a Git repository. @@ -141,6 +148,7 @@ type PipelineProject struct { Token string `json:"token,omitempty"` } +// PipelineStatus of Pipeline type PipelineStatus struct { // TaskResult total related tasks execute result. TaskResult PipelineTaskResult `json:"taskResult,omitempty"` @@ -152,6 +160,7 @@ type PipelineStatus struct { FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"` } +// PipelineTaskResult of Pipeline type PipelineTaskResult struct { // Total number of tasks. Total int `json:"total,omitempty"` @@ -163,6 +172,7 @@ type PipelineTaskResult struct { Ignored int `json:"ignored,omitempty"` } +// PipelineFailedDetail store failed message when pipeline run failed. type PipelineFailedDetail struct { // Task name of failed task. Task string `json:"task,omitempty"` @@ -170,6 +180,7 @@ type PipelineFailedDetail struct { Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"` } +// PipelineFailedDetailHost detail failed message for each host. type PipelineFailedDetailHost struct { // Host name of failed task. Host string `json:"host,omitempty"` @@ -189,6 +200,7 @@ type PipelineFailedDetailHost struct { // +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// Pipeline resource executor a playbook. type Pipeline struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -199,6 +211,7 @@ type Pipeline struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PipelineList of Pipeline type PipelineList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/core/v1alpha1/register.go b/pkg/apis/core/v1alpha1/register.go index 0469f018..986c483a 100644 --- a/pkg/apis/core/v1alpha1/register.go +++ b/pkg/apis/core/v1alpha1/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1alpha1 is the internal version. should not register in kubernetes +// Package v1alpha1 is the internal version, should not register in kubernetes // +k8s:deepcopy-gen=package,register // +groupName=kubekey.kubesphere.io // +kubebuilder:skip diff --git a/pkg/apis/core/v1alpha1/task_types.go b/pkg/apis/core/v1alpha1/task_types.go index 78ea1be7..9fa1dd1d 100644 --- a/pkg/apis/core/v1alpha1/task_types.go +++ b/pkg/apis/core/v1alpha1/task_types.go @@ -21,13 +21,19 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// TaskPhase of Task type TaskPhase string const ( + // TaskPhasePending of Task. Task has created but not deal TaskPhasePending TaskPhase = "Pending" + // TaskPhaseRunning of Task. deal Task TaskPhaseRunning TaskPhase = "Running" + // TaskPhaseSuccess of Task. Module of Task run success in each hosts. TaskPhaseSuccess TaskPhase = "Success" - TaskPhaseFailed TaskPhase = "Failed" + // TaskPhaseFailed of Task. once host run failed. + TaskPhaseFailed TaskPhase = "Failed" + // TaskPhaseIgnored of Task. once host run failed and set ignore_errors. TaskPhaseIgnored TaskPhase = "Ignored" ) @@ -36,7 +42,8 @@ const ( TaskAnnotationRole = "kubesphere.io/role" ) -type KubeKeyTaskSpec struct { +// TaskSpec of Task +type TaskSpec struct { Name string `json:"name,omitempty"` Hosts []string `json:"hosts,omitempty"` IgnoreError *bool `json:"ignoreError,omitempty"` @@ -50,17 +57,20 @@ type KubeKeyTaskSpec struct { Register string `json:"register,omitempty"` } +// Module of Task type Module struct { Name string `json:"name,omitempty"` Args runtime.RawExtension `json:"args,omitempty"` } +// TaskStatus of Task type TaskStatus struct { RestartCount int `json:"restartCount,omitempty"` Phase TaskPhase `json:"phase,omitempty"` HostResults []TaskHostResult `json:"hostResults,omitempty"` } +// TaskHostResult each host result for task type TaskHostResult struct { Host string `json:"host,omitempty"` Stdout string `json:"stdout,omitempty"` @@ -71,29 +81,35 @@ type TaskHostResult struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:scope=Namespaced +// Task of pipeline type Task struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec KubeKeyTaskSpec `json:"spec,omitempty"` - Status TaskStatus `json:"status,omitempty"` + Spec TaskSpec `json:"spec,omitempty"` + Status TaskStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// TaskList for Task type TaskList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Task `json:"items"` } +// IsComplete if Task IsSucceed or IsFailed func (t Task) IsComplete() bool { return t.IsSucceed() || t.IsFailed() } +// IsSucceed if Task.Status.Phase TaskPhaseSuccess or TaskPhaseIgnored func (t Task) IsSucceed() bool { return t.Status.Phase == TaskPhaseSuccess || t.Status.Phase == TaskPhaseIgnored } + +// IsFailed Task.Status.Phase is failed when reach the retries func (t Task) IsFailed() bool { return t.Status.Phase == TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount } diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 51e44433..b00b302a 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -24,43 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeKeyTaskSpec) DeepCopyInto(out *KubeKeyTaskSpec) { - *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoreError != nil { - in, out := &in.IgnoreError, &out.IgnoreError - *out = new(bool) - **out = **in - } - if in.When != nil { - in, out := &in.When, &out.When - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FailedWhen != nil { - in, out := &in.FailedWhen, &out.FailedWhen - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Loop.DeepCopyInto(&out.Loop) - in.Module.DeepCopyInto(&out.Module) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeKeyTaskSpec. -func (in *KubeKeyTaskSpec) DeepCopy() *KubeKeyTaskSpec { - if in == nil { - return nil - } - out := new(KubeKeyTaskSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Module) DeepCopyInto(out *Module) { *out = *in @@ -151,6 +114,43 @@ func (in *TaskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IgnoreError != nil { + in, out := &in.IgnoreError, &out.IgnoreError + *out = new(bool) + **out = **in + } + if in.When != nil { + in, out := &in.When, &out.When + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FailedWhen != nil { + in, out := &in.FailedWhen, &out.FailedWhen + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Loop.DeepCopyInto(&out.Loop) + in.Module.DeepCopyInto(&out.Module) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskStatus) DeepCopyInto(out *TaskStatus) { *out = *in diff --git a/pkg/apis/project/v1/base.go b/pkg/apis/project/v1/base.go index a1a833b1..275670bd 100644 --- a/pkg/apis/project/v1/base.go +++ b/pkg/apis/project/v1/base.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// Base defined in project. type Base struct { Name string `yaml:"name,omitempty"` diff --git a/pkg/apis/project/v1/block.go b/pkg/apis/project/v1/block.go index 7ab1b84a..24659d7f 100644 --- a/pkg/apis/project/v1/block.go +++ b/pkg/apis/project/v1/block.go @@ -23,15 +23,17 @@ import ( "k8s.io/klog/v2" ) +// Block defined in project. type Block struct { BlockBase - // If has Block, Task should be empty + // If it has Block, Task should be empty Task IncludeTasks string `yaml:"include_tasks,omitempty"` BlockInfo } +// BlockBase defined in project. type BlockBase struct { Base `yaml:",inline"` Conditional `yaml:",inline"` @@ -41,12 +43,14 @@ type BlockBase struct { Delegatable `yaml:",inline"` } +// BlockInfo defined in project. type BlockInfo struct { Block []Block `yaml:"block,omitempty"` Rescue []Block `yaml:"rescue,omitempty"` Always []Block `yaml:"always,omitempty"` } +// Task defined in project. type Task struct { AsyncVal int `yaml:"async,omitempty"` ChangedWhen When `yaml:"changed_when,omitempty"` @@ -62,11 +66,12 @@ type Task struct { // deprecated, used to be loop and loop_args but loop has been repurposed //LoopWith string `yaml:"loop_with"` - // - UnknownFiled map[string]any `yaml:"-"` + // UnknownField store undefined filed + UnknownField map[string]any `yaml:"-"` } -func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error { +// UnmarshalYAML yaml string to block. +func (b *Block) UnmarshalYAML(unmarshal func(any) error) error { // fill baseInfo var bb BlockBase if err := unmarshal(&bb); err == nil { @@ -76,58 +81,110 @@ func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error { var m map[string]any if err := unmarshal(&m); err != nil { klog.Errorf("unmarshal data to map error: %v", err) + return err } - if v, ok := m["include_tasks"]; ok { - b.IncludeTasks = v.(string) - } else if _, ok := m["block"]; ok { - // render block - var bi BlockInfo - err := unmarshal(&bi) + if includeTasks, ok := handleIncludeTasks(m); ok { + // Set the IncludeTasks field if "include_tasks" exists and is valid. + b.IncludeTasks = includeTasks + + return nil + } + + switch { + case m["block"] != nil: + // If the "block" key exists, unmarshal it into BlockInfo and set the BlockInfo field. + bi, err := handleBlock(m, unmarshal) if err != nil { - klog.Errorf("unmarshal data to block error: %v", err) return err } b.BlockInfo = bi - } else { - // render task - var t Task - err := unmarshal(&t) + default: + // If neither "include_tasks" nor "block" are present, treat the data as a task. + t, err := handleTask(m, unmarshal) if err != nil { - klog.Errorf("unmarshal data to task error: %v", err) return err } b.Task = t - deleteExistField(reflect.TypeOf(Block{}), m) - // set unknown flied to task.UnknownFiled - b.UnknownFiled = m + // Set any remaining unknown fields to the Task's UnknownField. + b.UnknownField = m } return nil } +// handleIncludeTasks checks if the "include_tasks" key exists in the map and is of type string. +// If so, it returns the string value and true, otherwise it returns an empty string and false. +func handleIncludeTasks(m map[string]any) (string, bool) { + if v, ok := m["include_tasks"]; ok { + if it, ok := v.(string); ok { + return it, true + } + } + + return "", false +} + +// handleBlock attempts to unmarshal the block data into a BlockInfo structure. +// If successful, it returns the BlockInfo and nil. If an error occurs, it logs the error and returns it. +func handleBlock(_ map[string]any, unmarshal func(any) error) (BlockInfo, error) { + var bi BlockInfo + if err := unmarshal(&bi); err != nil { + klog.Errorf("unmarshal data to block error: %v", err) + + return bi, err + } + + return bi, nil +} + +// handleTask attempts to unmarshal the task data into a Task structure. +// If successful, it deletes existing fields from the map, logs the error if it occurs, and returns the Task and nil. +func handleTask(m map[string]any, unmarshal func(any) error) (Task, error) { + var t Task + if err := unmarshal(&t); err != nil { + klog.Errorf("unmarshal data to task error: %v", err) + + return t, err + } + deleteExistField(reflect.TypeOf(Block{}), m) + + return t, nil +} + func deleteExistField(rt reflect.Type, m map[string]any) { - for i := 0; i < rt.NumField(); i++ { + for i := range rt.NumField() { field := rt.Field(i) if field.Anonymous { deleteExistField(field.Type, m) } else { - yamlTag := rt.Field(i).Tag.Get("yaml") - if yamlTag != "" { - for _, t := range strings.Split(yamlTag, ",") { - if _, ok := m[t]; ok { - delete(m, t) - break - } - } - } else { - t := strings.ToUpper(rt.Field(i).Name[:1]) + rt.Field(i).Name[1:] - if _, ok := m[t]; ok { - delete(m, t) - break - } + if isFound := deleteField(rt.Field(i), m); isFound { + break } } } } + +// deleteField find and delete the filed, return the field if found. +func deleteField(field reflect.StructField, m map[string]any) bool { + yamlTag := field.Tag.Get("yaml") + if yamlTag != "" { + for _, t := range strings.Split(yamlTag, ",") { + if _, ok := m[t]; ok { + delete(m, t) + + return true + } + } + } else { + t := strings.ToUpper(field.Name[:1]) + field.Name[1:] + if _, ok := m[t]; ok { + delete(m, t) + + return true + } + } + + return false +} diff --git a/pkg/apis/project/v1/collectionsearch.go b/pkg/apis/project/v1/collectionsearch.go index eeca5cc9..3dd2ce62 100644 --- a/pkg/apis/project/v1/collectionsearch.go +++ b/pkg/apis/project/v1/collectionsearch.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// CollectionSearch defined in project. type CollectionSearch struct { Collections []string `yaml:"collections,omitempty"` } diff --git a/pkg/apis/project/v1/conditional.go b/pkg/apis/project/v1/conditional.go index bbc1d946..e30e827c 100644 --- a/pkg/apis/project/v1/conditional.go +++ b/pkg/apis/project/v1/conditional.go @@ -17,27 +17,34 @@ limitations under the License. package v1 import ( - "fmt" + "errors" ) +// Conditional defined in project. type Conditional struct { When When `yaml:"when,omitempty"` } +// When defined in project. type When struct { Data []string } -func (w *When) UnmarshalYAML(unmarshal func(interface{}) error) error { +// UnmarshalYAML yaml string to when +func (w *When) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err == nil { w.Data = []string{s} + return nil } + var a []string if err := unmarshal(&a); err == nil { w.Data = a + return nil } - return fmt.Errorf("unsupported type, excepted string or array of strings") + + return errors.New("unsupported type, excepted string or array of strings") } diff --git a/pkg/apis/project/v1/delegatable.go b/pkg/apis/project/v1/delegatable.go index f6fa7c79..feacf29a 100644 --- a/pkg/apis/project/v1/delegatable.go +++ b/pkg/apis/project/v1/delegatable.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// Delegatable defined in project. type Delegatable struct { DelegateTo string `yaml:"delegate_to,omitempty"` DelegateFacts bool `yaml:"delegate_facts,omitempty"` diff --git a/pkg/apis/project/v1/handler.go b/pkg/apis/project/v1/handler.go index c91db2f8..84da4d44 100644 --- a/pkg/apis/project/v1/handler.go +++ b/pkg/apis/project/v1/handler.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// Handler defined in project. type Handler struct { //Task diff --git a/pkg/apis/project/v1/loop.go b/pkg/apis/project/v1/loop.go index ba2cff71..e180d297 100644 --- a/pkg/apis/project/v1/loop.go +++ b/pkg/apis/project/v1/loop.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// LoopControl defined in project. type LoopControl struct { LoopVar string `yaml:"loop_var,omitempty"` IndexVar string `yaml:"index_var,omitempty"` diff --git a/pkg/apis/project/v1/notifiable.go b/pkg/apis/project/v1/notifiable.go index 46c9ff15..ea59c064 100644 --- a/pkg/apis/project/v1/notifiable.go +++ b/pkg/apis/project/v1/notifiable.go @@ -16,6 +16,7 @@ limitations under the License. package v1 +// Notifiable defined in project. type Notifiable struct { Notify string `yaml:"notify,omitempty"` } diff --git a/pkg/apis/project/v1/play.go b/pkg/apis/project/v1/play.go index 396f02d2..5107902e 100644 --- a/pkg/apis/project/v1/play.go +++ b/pkg/apis/project/v1/play.go @@ -16,8 +16,11 @@ limitations under the License. package v1 -import "fmt" +import ( + "errors" +) +// Play defined in project. type Play struct { ImportPlaybook string `yaml:"import_playbook,omitempty"` @@ -56,38 +59,50 @@ type Play struct { Order string `yaml:"order,omitempty"` } +// PlaySerial defined in project. type PlaySerial struct { Data []any } -func (s *PlaySerial) UnmarshalYAML(unmarshal func(interface{}) error) error { +// UnmarshalYAML yaml string to serial. +func (s *PlaySerial) UnmarshalYAML(unmarshal func(any) error) error { var as []any if err := unmarshal(&as); err == nil { s.Data = as + return nil } + var a any if err := unmarshal(&a); err == nil { s.Data = []any{a} + return nil } - return fmt.Errorf("unsupported type, excepted any or array") + + return errors.New("unsupported type, excepted any or array") } +// PlayHost defined in project. type PlayHost struct { Hosts []string } -func (p *PlayHost) UnmarshalYAML(unmarshal func(interface{}) error) error { +// UnmarshalYAML yaml string to play +func (p *PlayHost) UnmarshalYAML(unmarshal func(any) error) error { var hs []string if err := unmarshal(&hs); err == nil { p.Hosts = hs + return nil } + var h string if err := unmarshal(&h); err == nil { p.Hosts = []string{h} + return nil } - return fmt.Errorf("unsupported type, excepted string or string array") + + return errors.New("unsupported type, excepted string or string array") } diff --git a/pkg/apis/project/v1/play_test.go b/pkg/apis/project/v1/play_test.go index 8e01226b..f5b5957c 100644 --- a/pkg/apis/project/v1/play_test.go +++ b/pkg/apis/project/v1/play_test.go @@ -173,7 +173,7 @@ func TestUnmarshalYaml(t *testing.T) { Tasks: []Block{ { BlockBase: BlockBase{Base: Base{Name: "test"}}, - Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}}, + Task: Task{UnknownField: map[string]any{"custom-module": "abc"}}, }, }, }, @@ -200,7 +200,7 @@ func TestUnmarshalYaml(t *testing.T) { BlockInfo: BlockInfo{ Block: []Block{{ BlockBase: BlockBase{Base: Base{Name: "test | test"}}, - Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}}, + Task: Task{UnknownField: map[string]any{"custom-module": "abc"}}, }}, }, }, @@ -214,7 +214,9 @@ func TestUnmarshalYaml(t *testing.T) { t.Run(tc.name, func(t *testing.T) { var pb []Play err := yaml.Unmarshal(tc.data, &pb) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, tc.excepted, pb) }) } diff --git a/pkg/apis/project/v1/playbook.go b/pkg/apis/project/v1/playbook.go index af41848e..8a2fcae1 100644 --- a/pkg/apis/project/v1/playbook.go +++ b/pkg/apis/project/v1/playbook.go @@ -16,24 +16,30 @@ limitations under the License. package v1 -import "fmt" +import ( + "errors" +) +// Playbook defined in project. type Playbook struct { Play []Play } +// Validate playbook. delete empty ImportPlaybook which has convert to play. func (p *Playbook) Validate() error { - var newPlay = make([]Play, len(p.Play)) - for i, play := range p.Play { - // delete import_playbook import_playbook is a link, should be ignored. + var newPlay = make([]Play, 0) + for _, play := range p.Play { + // import_playbook is a link, should be ignored. if play.ImportPlaybook != "" { continue } + if len(play.PlayHost.Hosts) == 0 { - return fmt.Errorf("playbook's hosts must not be empty") + return errors.New("playbook's hosts must not be empty") } - newPlay[i] = play + newPlay = append(newPlay, play) } p.Play = newPlay + return nil } diff --git a/pkg/apis/project/v1/playbook_test.go b/pkg/apis/project/v1/playbook_test.go index 58c9f984..39310071 100644 --- a/pkg/apis/project/v1/playbook_test.go +++ b/pkg/apis/project/v1/playbook_test.go @@ -41,8 +41,7 @@ func TestValidate(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - err := tc.playbook.Validate() - assert.Error(t, err) + assert.Error(t, tc.playbook.Validate()) }) } } diff --git a/pkg/apis/project/v1/role.go b/pkg/apis/project/v1/role.go index 3e33b249..86fd3bc9 100644 --- a/pkg/apis/project/v1/role.go +++ b/pkg/apis/project/v1/role.go @@ -16,10 +16,12 @@ limitations under the License. package v1 +// Role defined in project. type Role struct { RoleInfo } +// RoleInfo defined in project. type RoleInfo struct { Base `yaml:",inline"` Conditional `yaml:",inline"` @@ -32,15 +34,19 @@ type RoleInfo struct { Block []Block } -func (r *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { +// UnmarshalYAML yaml string to role. +func (r *Role) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err == nil { r.Role = s + return nil } + var info RoleInfo if err := unmarshal(&info); err == nil { r.RoleInfo = info + return nil } diff --git a/pkg/apis/project/v1/taggable.go b/pkg/apis/project/v1/taggable.go index 0c7d0299..7f36d84b 100644 --- a/pkg/apis/project/v1/taggable.go +++ b/pkg/apis/project/v1/taggable.go @@ -18,6 +18,18 @@ package v1 import "slices" +// the special tags +const ( + // AlwaysTag it always run + AlwaysTag = "always" + // NeverTag it never run + NeverTag = "never" + // AllTag represent all tags + AllTag = "all" + // TaggedTag represent which has tags + TaggedTag = "tagged" +) + // Taggable if it should executor type Taggable struct { Tags []string `yaml:"tags,omitempty"` @@ -28,27 +40,28 @@ func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool { shouldRun := true if len(onlyTags) > 0 { - if slices.Contains(t.Tags, "always") { + switch { + case slices.Contains(t.Tags, AlwaysTag): shouldRun = true - } else if slices.Contains(onlyTags, "all") && !slices.Contains(t.Tags, "never") { + case slices.Contains(onlyTags, AllTag) && !slices.Contains(t.Tags, NeverTag): shouldRun = true - } else if slices.Contains(onlyTags, "tagged") && len(onlyTags) > 0 && !slices.Contains(t.Tags, "never") { + case slices.Contains(onlyTags, TaggedTag) && !slices.Contains(t.Tags, NeverTag): shouldRun = true - } else if !isdisjoint(onlyTags, t.Tags) { + case !isdisjoint(onlyTags, t.Tags): shouldRun = true - } else { + default: shouldRun = false } } if shouldRun && len(skipTags) > 0 { - if slices.Contains(skipTags, "all") { - if !slices.Contains(t.Tags, "always") || !slices.Contains(skipTags, "always") { - shouldRun = false - } - } else if !isdisjoint(skipTags, t.Tags) { + switch { + case slices.Contains(skipTags, AllTag) && + (!slices.Contains(t.Tags, AlwaysTag) || !slices.Contains(skipTags, AlwaysTag)): shouldRun = false - } else if slices.Contains(skipTags, "tagged") && len(skipTags) > 0 { + case !isdisjoint(skipTags, t.Tags): + shouldRun = false + case slices.Contains(skipTags, TaggedTag) && len(skipTags) > 0: shouldRun = false } } @@ -59,13 +72,15 @@ func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool { // JoinTag the child block should inherit tag for parent block func JoinTag(child, parent Taggable) Taggable { for _, tag := range parent.Tags { - if tag == "always" { // skip inherit "always" tag + if tag == AlwaysTag { // skip inherit "always" tag continue } + if !slices.Contains(child.Tags, tag) { child.Tags = append(child.Tags, tag) } } + return child } @@ -76,5 +91,6 @@ func isdisjoint(a, b []string) bool { return false } } + return true } diff --git a/pkg/connector/connector.go b/pkg/connector/connector.go index 3baeda33..cf6f8f79 100644 --- a/pkg/connector/connector.go +++ b/pkg/connector/connector.go @@ -25,6 +25,7 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/exec" + "k8s.io/utils/ptr" _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/variable" @@ -32,7 +33,6 @@ import ( // connectedType for connector const ( - connectedDefault = "" connectedSSH = "ssh" connectedLocal = "local" connectedKubernetes = "kubernetes" @@ -60,6 +60,7 @@ type Connector interface { // vars contains all inventory for host. It's best to define the connector info in inventory file. func NewConnector(host string, connectorVars map[string]any) (Connector, error) { connectedType, _ := variable.StringVar(nil, connectorVars, _const.VariableConnectorType) + switch connectedType { case connectedLocal: return &localConnector{Cmd: exec.New()}, nil @@ -74,7 +75,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort) if err != nil { klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort) - portParam = defaultSSHPort + portParam = ptr.To(defaultSSHPort) } // get user in connector variable. if empty, set default user: root. userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser) @@ -93,9 +94,10 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) klog.V(4).Infof("ssh public key is empty, use: %s", defaultSSHPrivateKey) keyParam = defaultSSHPrivateKey } + return &sshConnector{ Host: hostParam, - Port: portParam, + Port: *portParam, User: userParam, Password: passwdParam, PrivateKey: keyParam, @@ -105,6 +107,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) if err != nil && host != _const.VariableLocalHost { return nil, err } + return &kubernetesConnector{Cmd: exec.New(), clusterName: host, kubeconfig: kubeconfig}, nil default: localHost, _ := os.Hostname() @@ -121,7 +124,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort) if err != nil { klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort) - portParam = defaultSSHPort + portParam = ptr.To(defaultSSHPort) } // get user in connector variable. if empty, set default user: root. userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser) @@ -143,7 +146,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) return &sshConnector{ Host: hostParam, - Port: portParam, + Port: *portParam, User: userParam, Password: passwdParam, PrivateKey: keyParam, @@ -153,7 +156,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error) // GatherFacts get host info. type GatherFacts interface { - Info(ctx context.Context) (map[string]any, error) + HostInfo(ctx context.Context) (map[string]any, error) } // isLocalIP check if given ipAddr is local network ip @@ -161,8 +164,10 @@ func isLocalIP(ipAddr string) bool { addrs, err := net.InterfaceAddrs() if err != nil { klog.ErrorS(err, "get network address error") + return false } + for _, addr := range addrs { var ip net.IP switch v := addr.(type) { @@ -172,11 +177,14 @@ func isLocalIP(ipAddr string) bool { ip = v.IP default: klog.V(4).InfoS("unknown address type", "address", addr.String()) + continue } + if ip.String() == ipAddr { return true } } + return false } diff --git a/pkg/connector/helper.go b/pkg/connector/helper.go index 848a7da4..60f4f383 100644 --- a/pkg/connector/helper.go +++ b/pkg/connector/helper.go @@ -35,6 +35,7 @@ func convertBytesToMap(bs []byte, split string) map[string]string { config[key] = value } } + return config } diff --git a/pkg/connector/kubernetes_ connector.go b/pkg/connector/kubernetes_connector.go similarity index 72% rename from pkg/connector/kubernetes_ connector.go rename to pkg/connector/kubernetes_connector.go index 421c9c0a..7213a64b 100644 --- a/pkg/connector/kubernetes_ connector.go +++ b/pkg/connector/kubernetes_connector.go @@ -36,73 +36,86 @@ var _ Connector = &kubernetesConnector{} type kubernetesConnector struct { clusterName string kubeconfig string - rootDir string + homeDir string Cmd exec.Interface } -func (c *kubernetesConnector) Init(ctx context.Context) error { +// Init connector, create home dir in local for each kubernetes. +func (c *kubernetesConnector) Init(_ context.Context) error { if c.clusterName == _const.VariableLocalHost && c.kubeconfig == "" { - // use default kubeconfig. skip klog.V(4).InfoS("kubeconfig is not set, using local kubeconfig") + // use default kubeconfig. skip return nil } - // set rootDir - c.rootDir = filepath.Join(_const.GetWorkDir(), _const.KubernetesDir, c.clusterName) - if _, err := os.Stat(c.rootDir); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(c.rootDir, os.ModePerm); err != nil { + // set home dir for each kubernetes + c.homeDir = filepath.Join(_const.GetWorkDir(), _const.KubernetesDir, c.clusterName) + if _, err := os.Stat(c.homeDir); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(c.homeDir, os.ModePerm); err != nil { klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName) + // if dir is not exist, create it. return err } } - // set kubeconfig to root dir - kubeconfigPath := filepath.Join(c.rootDir, kubeconfigRelPath) + // create kubeconfig path in home dir + kubeconfigPath := filepath.Join(c.homeDir, kubeconfigRelPath) if _, err := os.Stat(kubeconfigPath); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(kubeconfigPath), os.ModePerm); err != nil { klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName) + return err } } + // write kubeconfig to home dir if err := os.WriteFile(kubeconfigPath, []byte(c.kubeconfig), os.ModePerm); err != nil { klog.V(4).ErrorS(err, "Failed to create kubeconfig file", "cluster", c.clusterName) + return err } + return nil } -func (c *kubernetesConnector) Close(ctx context.Context) error { +// Close connector, do nothing +func (c *kubernetesConnector) Close(_ context.Context) error { return nil } // PutFile copy src file to dst file. src is the local filename, dst is the local filename. // Typically, the configuration file for each cluster may be different, // and it may be necessary to keep them in separate directories locally. -func (c *kubernetesConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error { - dst = filepath.Join(c.rootDir, dst) +func (c *kubernetesConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error { + dst = filepath.Join(c.homeDir, dst) if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil { klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst) + return err } } + return os.WriteFile(dst, src, mode) } // FetchFile copy src file to dst writer. src is the local filename, dst is the local writer. func (c *kubernetesConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error { // add "--kubeconfig" to src command + klog.V(5).InfoS("exec local command", "cmd", src) command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", src) - command.SetDir(c.rootDir) - command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.rootDir, kubeconfigRelPath)}) + command.SetDir(c.homeDir) + command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)}) command.SetStdout(dst) _, err := command.CombinedOutput() + return err } +// ExecuteCommand in a kubernetes cluster func (c *kubernetesConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) { // add "--kubeconfig" to src command - klog.V(4).InfoS("exec local command", "cmd", cmd) + klog.V(5).InfoS("exec local command", "cmd", cmd) command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd) - command.SetDir(c.rootDir) - command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.rootDir, kubeconfigRelPath)}) + command.SetDir(c.homeDir) + command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)}) + return command.CombinedOutput() } diff --git a/pkg/connector/local_connector.go b/pkg/connector/local_connector.go index e958259a..ad62185f 100644 --- a/pkg/connector/local_connector.go +++ b/pkg/connector/local_connector.go @@ -39,46 +39,57 @@ type localConnector struct { Cmd exec.Interface } -func (c *localConnector) Init(ctx context.Context) error { +// Init connector. do nothing +func (c *localConnector) Init(context.Context) error { return nil } -func (c *localConnector) Close(ctx context.Context) error { +// Close connector. do nothing +func (c *localConnector) Close(context.Context) error { return nil } // PutFile copy src file to dst file. src is the local filename, dst is the local filename. -func (c *localConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error { +func (c *localConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error { if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil { klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst) + return err } } + return os.WriteFile(dst, src, mode) } // FetchFile copy src file to dst writer. src is the local filename, dst is the local writer. -func (c *localConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error { +func (c *localConnector) FetchFile(_ context.Context, src string, dst io.Writer) error { var err error file, err := os.Open(src) if err != nil { klog.V(4).ErrorS(err, "Failed to read local file failed", "src_file", src) + return err } + if _, err := io.Copy(dst, file); err != nil { klog.V(4).ErrorS(err, "Failed to copy local file", "src_file", src) + return err } + return nil } +// ExecuteCommand in local host func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) { - klog.V(4).InfoS("exec local command", "cmd", cmd) + klog.V(5).InfoS("exec local command", "cmd", cmd) + return c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd).CombinedOutput() } -func (c *localConnector) Info(ctx context.Context) (map[string]any, error) { +// HostInfo for GatherFacts +func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) { switch runtime.GOOS { case "linux": // os information @@ -123,6 +134,7 @@ func (c *localConnector) Info(ctx context.Context) (map[string]any, error) { }, nil default: klog.V(4).ErrorS(nil, "Unsupported platform", "platform", runtime.GOOS) - return nil, nil + + return make(map[string]any), nil } } diff --git a/pkg/connector/local_connector_test.go b/pkg/connector/local_connector_test.go index b0d3e5ed..2e9883b1 100644 --- a/pkg/connector/local_connector_test.go +++ b/pkg/connector/local_connector_test.go @@ -18,6 +18,7 @@ package connector import ( "context" + "errors" "fmt" "strings" "testing" @@ -32,16 +33,17 @@ func newFakeLocalConnector(runCmd string, output string) *localConnector { return &localConnector{ Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { - if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == fmt.Sprintf("/bin/sh -c %s", runCmd) { + if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == "/bin/sh -c "+runCmd { return &testingexec.FakeCmd{ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) { return []byte(output), nil, nil }}, } } + return &testingexec.FakeCmd{ CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) { - return nil, nil, fmt.Errorf("error command") + return nil, nil, errors.New("error command") }}, } }, @@ -63,7 +65,7 @@ func TestSshConnector_ExecuteCommand(t *testing.T) { { name: "execute command failed", cmd: "echo 'hello1'", - exceptedErr: fmt.Errorf("error command"), + exceptedErr: errors.New("error command"), }, } diff --git a/pkg/connector/ssh_connector.go b/pkg/connector/ssh_connector.go index 40f76b68..b41b265c 100644 --- a/pkg/connector/ssh_connector.go +++ b/pkg/connector/ssh_connector.go @@ -19,10 +19,12 @@ package connector import ( "bytes" "context" + "errors" "fmt" "io" "io/fs" "os" + "os/user" "path/filepath" "time" @@ -34,11 +36,20 @@ import ( ) const ( - defaultSSHPort = 22 - defaultSSHUser = "root" - defaultSSHPrivateKey = "/root/.ssh/id_rsa" + defaultSSHPort = 22 + defaultSSHUser = "root" ) +var defaultSSHPrivateKey string + +func init() { + if currentUser, err := user.Current(); err == nil { + defaultSSHPrivateKey = filepath.Join(currentUser.HomeDir, ".ssh/id_rsa") + } else { + defaultSSHPrivateKey = filepath.Join(defaultSSHUser, ".ssh/id_rsa") + } +} + var _ Connector = &sshConnector{} var _ GatherFacts = &sshConnector{} @@ -51,10 +62,12 @@ type sshConnector struct { client *ssh.Client } -func (c *sshConnector) Init(ctx context.Context) error { +// Init connector, get ssh.Client +func (c *sshConnector) Init(context.Context) error { if c.Host == "" { - return fmt.Errorf("host is not set") + return errors.New("host is not set") } + var auth []ssh.AuthMethod if c.Password != "" { auth = append(auth, ssh.Password(c.Password)) @@ -79,6 +92,7 @@ func (c *sshConnector) Init(ctx context.Context) error { }) if err != nil { klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", c.Port) + return err } c.client = sshClient @@ -86,16 +100,18 @@ func (c *sshConnector) Init(ctx context.Context) error { return nil } -func (c *sshConnector) Close(ctx context.Context) error { +// Close connector +func (c *sshConnector) Close(context.Context) error { return c.client.Close() } // PutFile to remote node. src is the file bytes. dst is the remote filename -func (c *sshConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error { +func (c *sshConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error { // create sftp client sftpClient, err := sftp.NewClient(c.client) if err != nil { klog.V(4).ErrorS(err, "Failed to create sftp client") + return err } defer sftpClient.Close() @@ -103,29 +119,35 @@ func (c *sshConnector) PutFile(ctx context.Context, src []byte, dst string, mode if _, err := sftpClient.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) { if err := sftpClient.MkdirAll(filepath.Dir(dst)); err != nil { klog.V(4).ErrorS(err, "Failed to create remote dir", "remote_file", dst) + return err } } + rf, err := sftpClient.Create(dst) if err != nil { klog.V(4).ErrorS(err, "Failed to create remote file", "remote_file", dst) + return err } defer rf.Close() if _, err = rf.Write(src); err != nil { klog.V(4).ErrorS(err, "Failed to write content to remote file", "remote_file", dst) + return err } + return rf.Chmod(mode) } // FetchFile from remote node. src is the remote filename, dst is the local writer. -func (c *sshConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error { +func (c *sshConnector) FetchFile(_ context.Context, src string, dst io.Writer) error { // create sftp client sftpClient, err := sftp.NewClient(c.client) if err != nil { klog.V(4).ErrorS(err, "Failed to create sftp client", "remote_file", src) + return err } defer sftpClient.Close() @@ -133,23 +155,28 @@ func (c *sshConnector) FetchFile(ctx context.Context, src string, dst io.Writer) rf, err := sftpClient.Open(src) if err != nil { klog.V(4).ErrorS(err, "Failed to open file", "remote_file", src) + return err } defer rf.Close() if _, err := io.Copy(dst, rf); err != nil { klog.V(4).ErrorS(err, "Failed to copy file", "remote_file", src) + return err } + return nil } -func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) { - klog.V(4).InfoS("exec ssh command", "cmd", cmd, "host", c.Host) +// ExecuteCommand in remote host +func (c *sshConnector) ExecuteCommand(_ context.Context, cmd string) ([]byte, error) { + klog.V(5).InfoS("exec ssh command", "cmd", cmd, "host", c.Host) // create ssh session session, err := c.client.NewSession() if err != nil { klog.V(4).ErrorS(err, "Failed to create ssh session") + return nil, err } defer session.Close() @@ -157,7 +184,8 @@ func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, return session.CombinedOutput(cmd) } -func (c *sshConnector) Info(ctx context.Context) (map[string]any, error) { +// HostInfo for GatherFacts +func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) { // os information osVars := make(map[string]any) var osRelease bytes.Buffer diff --git a/pkg/const/common.go b/pkg/const/common.go index 1a49b0f3..3ea218d5 100644 --- a/pkg/const/common.go +++ b/pkg/const/common.go @@ -48,7 +48,7 @@ const ( // === From system generate === // VariableInventoryName the value which defined in inventory.spec.host. VariableInventoryName = "inventory_name" // VariableHostName the value is node hostname, default VariableInventoryName. - // if VariableInventoryName is "localhost". try to set the actual name. + // If VariableInventoryName is "localhost". try to set the actual name. VariableHostName = "hostname" // VariableGlobalHosts the value is host_var which defined in inventory. VariableGlobalHosts = "inventory_hosts" @@ -76,5 +76,6 @@ const ( // === From GatherFact === ) const ( // === From runtime === + // VariableItem for "loop" argument when run a task. VariableItem = "item" ) diff --git a/pkg/const/helper.go b/pkg/const/helper.go index 9bede4e6..2eab9950 100644 --- a/pkg/const/helper.go +++ b/pkg/const/helper.go @@ -42,6 +42,7 @@ func GetRuntimeDir() string { return filepath.Join(workDir, RuntimeDir) } +// RuntimeDirFromPipeline returns the absolute path of the runtime directory for specify Pipeline func RuntimeDirFromPipeline(obj kkcorev1.Pipeline) string { return filepath.Join(GetRuntimeDir(), kkcorev1.SchemeGroupVersion.String(), RuntimePipelineDir, obj.Namespace, obj.Name) diff --git a/pkg/const/scheme.go b/pkg/const/scheme.go index 490af0a1..19e7f5e5 100644 --- a/pkg/const/scheme.go +++ b/pkg/const/scheme.go @@ -33,7 +33,6 @@ var ( // NOTE: If you are copying this file to start a new api group, STOP! Copy the // extensions group instead. This Scheme is special and should appear ONLY in // the api group, unless you really know what you're doing. - // TODO(lavalamp): make the above error impossible. Scheme = newScheme() // Codecs provides access to encoding and decoding for the scheme @@ -51,5 +50,6 @@ func newScheme() *runtime.Scheme { utilruntime.Must(kkcorev1.AddToScheme(s)) utilruntime.Must(kkcorev1alpha1.AddToScheme(s)) utilruntime.Must(kkcorev1alpha1.AddConversionFuncs(s)) + return s } diff --git a/pkg/const/workdir.go b/pkg/const/workdir.go index 00d19f81..6514d910 100644 --- a/pkg/const/workdir.go +++ b/pkg/const/workdir.go @@ -52,6 +52,8 @@ workdir/ | | | | |-- inventory.yaml | |-- kubekey/ +|-- artifact-path... +|-- images | |-- kubernetes/ @@ -117,9 +119,15 @@ const RuntimePipelineVariableDir = "variable" // inventory.yaml is the data of Inventory resource -// "kubekey" is the default directory name under the working directory. It is used to store +// ArtifactDir is the default directory name under the working directory. It is used to store // files required when executing the kubekey command (such as: docker, etcd, image packages, etc.). // These files will be downloaded locally and distributed to remote nodes. +const ArtifactDir = "kubekey" + +// artifact-path store artifact package. + +// ArtifactImagesDir store images files. contains blobs and manifests. +const ArtifactImagesDir = "images" // KubernetesDir represents the remote host directory for each kubernetes connection const KubernetesDir = "kubernetes" diff --git a/pkg/controllers/pipeline_controller.go b/pkg/controllers/pipeline_controller.go index 00b23b80..addee6ce 100644 --- a/pkg/controllers/pipeline_controller.go +++ b/pkg/controllers/pipeline_controller.go @@ -46,6 +46,7 @@ const ( defaultServiceAccount = "kk-executor" ) +// PipelineReconciler reconcile pipeline type PipelineReconciler struct { *runtime.Scheme ctrlclient.Client @@ -62,13 +63,16 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct if err != nil { if apierrors.IsNotFound(err) { klog.V(5).InfoS("pipeline not found", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return ctrl.Result{}, nil } + return ctrl.Result{}, err } if pipeline.DeletionTimestamp != nil { klog.V(5).InfoS("pipeline is deleting", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return ctrl.Result{}, nil } @@ -78,6 +82,7 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct pipeline.Status.Phase = kkcorev1.PipelinePhasePending if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil { klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return ctrl.Result{}, err } case kkcorev1.PipelinePhasePending: @@ -85,15 +90,18 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil { klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + return ctrl.Result{}, err } case kkcorev1.PipelinePhaseRunning: + return r.dealRunningPipeline(ctx, pipeline) case kkcorev1.PipelinePhaseFailed: // do nothing case kkcorev1.PipelinePhaseSucceed: // do nothing } + return ctrl.Result{}, nil } @@ -153,6 +161,7 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline * } } } + return ctrl.Result{}, nil } @@ -199,6 +208,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, sa); err != nil { if !apierrors.IsNotFound(err) { klog.ErrorS(err, "get service account", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return err } // create sa @@ -206,6 +216,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: pipeline.Namespace}, }); err != nil { klog.ErrorS(err, "create service account error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return err } } @@ -214,6 +225,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, rb); err != nil { if !apierrors.IsNotFound(err) { klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return err } //create rolebinding @@ -234,12 +246,15 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k }, }); err != nil { klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return err } } + return nil } +// GenerateJobSpec for pipeline func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1.JobSpec { // get ServiceAccount name for executor pod saName, ok := os.LookupEnv("EXECUTOR_SERVICEACCOUNT") @@ -282,6 +297,7 @@ func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1 }, }, } + return jobSpec } diff --git a/pkg/converter/converter.go b/pkg/converter/converter.go index 48a76788..6da378f1 100644 --- a/pkg/converter/converter.go +++ b/pkg/converter/converter.go @@ -17,7 +17,7 @@ limitations under the License. package converter import ( - "context" + "errors" "fmt" "math" "strconv" @@ -29,11 +29,11 @@ import ( "k8s.io/klog/v2" kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" ) // MarshalBlock marshal block to task -func MarshalBlock(ctx context.Context, role string, hosts []string, when []string, block projectv1.Block) *kkcorev1alpha1.Task { +func MarshalBlock(role string, hosts []string, when []string, block kkprojectv1.Block) *kkcorev1alpha1.Task { task := &kkcorev1alpha1.Task{ TypeMeta: metav1.TypeMeta{ Kind: "Task", @@ -45,7 +45,7 @@ func MarshalBlock(ctx context.Context, role string, hosts []string, when []strin kkcorev1alpha1.TaskAnnotationRole: role, }, }, - Spec: kkcorev1alpha1.KubeKeyTaskSpec{ + Spec: kkcorev1alpha1.TaskSpec{ Name: block.Name, Hosts: hosts, IgnoreError: block.IgnoreErrors, @@ -55,6 +55,7 @@ func MarshalBlock(ctx context.Context, role string, hosts []string, when []strin Register: block.Register, }, } + if block.Loop != nil { data, err := json.Marshal(block.Loop) if err != nil { @@ -95,13 +96,14 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) { sis[i] = b } default: - return nil, fmt.Errorf("unknown serial type. only support int or percent") + return nil, errors.New("unknown serial type. only support int or percent") } if sis[i] == 0 { return nil, fmt.Errorf("serial %v should not be zero", a) } count += sis[i] } + if len(hosts) > count { for i := 0.0; i < float64(len(hosts)-count)/float64(sis[len(sis)-1]); i++ { sis = append(sis, sis[len(sis)-1]) @@ -119,5 +121,6 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) { result[i] = hosts[begin:end] begin += si } + return result, nil } diff --git a/pkg/converter/converter_test.go b/pkg/converter/converter_test.go index f4cebb20..942368ac 100644 --- a/pkg/converter/converter_test.go +++ b/pkg/converter/converter_test.go @@ -93,12 +93,15 @@ func TestGroupHostBySerial(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { result, err := GroupHostBySerial(hosts, tc.serial) - if tc.exceptErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.exceptResult, result) + if err != nil { + if tc.exceptErr { + assert.Error(t, err) + + return + } + t.Fatal(err) } + assert.Equal(t, tc.exceptResult, result) }) } } diff --git a/pkg/converter/internal/functions.go b/pkg/converter/internal/functions.go index 7a93cd02..499b87d3 100644 --- a/pkg/converter/internal/functions.go +++ b/pkg/converter/internal/functions.go @@ -9,11 +9,11 @@ import ( "gopkg.in/yaml.v3" ) +// Template parse file or vars which defined in project. var Template = template.New("kubekey").Funcs(funcMap()) func funcMap() template.FuncMap { var f = sprig.TxtFuncMap() - delete(f, "env") delete(f, "expandenv") // add custom function @@ -28,12 +28,13 @@ func funcMap() template.FuncMap { // always return a string, even on marshal error (empty string). // // This is designed to be called from a template. -func toYAML(v interface{}) string { +func toYAML(v any) string { data, err := yaml.Marshal(v) if err != nil { // Swallow errors inside of a template. return "" } + return strings.TrimSuffix(string(data), "\n") } @@ -41,13 +42,15 @@ func toYAML(v interface{}) string { func ipInCIDR(index int, cidr string) (string, error) { var ips = make([]string, 0) for _, s := range strings.Split(cidr, ",") { - ips = append(ips, parseIp(s)...) + ips = append(ips, parseIP(s)...) } + if index < 0 { index = max(len(ips)+index, 0) } index = max(index, 0) index = min(index, len(ips)-1) + return ips[index], nil } diff --git a/pkg/converter/internal/helper.go b/pkg/converter/internal/helper.go index 5d3fa6f8..80c9485a 100644 --- a/pkg/converter/internal/helper.go +++ b/pkg/converter/internal/helper.go @@ -7,8 +7,8 @@ import ( "strings" ) -// parseIp parse cidr to actual ip slice. or parse the ip range string (format xxx-xxx) to actual ip slice, -func parseIp(ip string) []string { +// parseIP parse cidr to actual ip slice, or parse the ip range string (format xxx-xxx) to actual ip slice, +func parseIP(ip string) []string { var availableIPs []string // if ip is "1.1.1.1/",trim / ip = strings.TrimRight(ip, "/") @@ -25,6 +25,7 @@ func parseIp(ip string) []string { } else { availableIPs = append(availableIPs, ip) } + return availableIPs } @@ -36,21 +37,21 @@ func getAvailableIPRange(ipStart, ipEnd string) []string { if firstIP.To4() == nil || endIP.To4() == nil { return availableIPs } + firstIPNum := ipToInt(firstIP.To4()) - EndIPNum := ipToInt(endIP.To4()) + endIPNum := ipToInt(endIP.To4()) pos := int32(1) - newNum := firstIPNum - - for newNum <= EndIPNum { + for newNum <= endIPNum { availableIPs = append(availableIPs, intToIP(newNum).String()) newNum += pos } + return availableIPs } func getAvailableIP(ipAndMask string) []string { - var availableIPs []string + var availableIPs = make([]string, 0) ipAndMask = strings.TrimSpace(ipAndMask) ipAndMask = iPAddressToCIDR(ipAndMask) @@ -63,11 +64,12 @@ func getAvailableIP(ipAndMask string) []string { m := size - 2 // -1 for the broadcast address, -1 for the gateway address var newNum int32 - for attempt := int32(0); attempt < m; attempt++ { + for range m { newNum = ipNum + pos pos = pos%m + 1 availableIPs = append(availableIPs, intToIP(newNum).String()) } + return availableIPs } @@ -78,7 +80,8 @@ func ipToInt(ip net.IP) int32 { func intToIP(n int32) net.IP { b := make([]byte, 4) binary.BigEndian.PutUint32(b, uint32(n)) - return net.IP(b) + + return b } func iPAddressToCIDR(ipAddress string) string { @@ -89,10 +92,11 @@ func iPAddressToCIDR(ipAddress string) string { if strings.Contains(mask, ".") { mask = iPMaskStringToCIDR(mask) } + return ip + "/" + mask - } else { - return ipAddress } + + return ipAddress } func iPMaskStringToCIDR(netmask string) string { @@ -101,8 +105,10 @@ func iPMaskStringToCIDR(netmask string) string { for i, v := range netmaskList { mint[i], _ = strconv.Atoi(v) } + myIPMask := net.IPv4Mask(byte(mint[0]), byte(mint[1]), byte(mint[2]), byte(mint[3])) ones, _ := myIPMask.Size() + return strconv.Itoa(ones) } @@ -113,13 +119,15 @@ func networkRange(network *net.IPNet) (net.IP, net.IP) { for i := 0; i < len(lastIP); i++ { lastIP[i] = netIP[i] | ^network.Mask[i] } + return firstIP, lastIP } func networkSize(mask net.IPMask) int32 { m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { + for i := range net.IPv4len { m[i] = ^mask[i] } + return int32(binary.BigEndian.Uint32(m)) + 1 } diff --git a/pkg/converter/internal/helper_test.go b/pkg/converter/internal/helper_test.go index 43ce06a1..d7446121 100644 --- a/pkg/converter/internal/helper_test.go +++ b/pkg/converter/internal/helper_test.go @@ -19,11 +19,12 @@ func TestParseIp(t *testing.T) { excepted: func() []string { // 192.168.0.1 - 192.168.63.254 var ips []string - for i := 0; i <= 63; i++ { - for j := 0; j <= 255; j++ { + for i := range 64 { + for j := range 256 { ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j)) } } + return ips[1 : len(ips)-1] }, }, @@ -33,11 +34,12 @@ func TestParseIp(t *testing.T) { excepted: func() []string { // 192.168.0.1 - 192.168.63.254 var ips []string - for i := 0; i <= 63; i++ { - for j := 0; j <= 255; j++ { + for i := range 64 { + for j := range 256 { ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j)) } } + return ips[1 : len(ips)-1] }, }, @@ -45,7 +47,7 @@ func TestParseIp(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.excepted(), parseIp(tc.ipRange)) + assert.Equal(t, tc.excepted(), parseIP(tc.ipRange)) }) } } diff --git a/pkg/converter/tmpl/template.go b/pkg/converter/tmpl/template.go index d57343a1..d50cc9af 100644 --- a/pkg/converter/tmpl/template.go +++ b/pkg/converter/tmpl/template.go @@ -32,19 +32,22 @@ func ParseBool(ctx map[string]any, inputs []string) (bool, error) { if !IsTmplSyntax(input) { input = "{{ " + input + " }}" } + tl, err := internal.Template.Parse(input) if err != nil { - return false, fmt.Errorf("failed to parse template '%s': %v", input, err) + return false, fmt.Errorf("failed to parse template '%s': %w", input, err) } + result := bytes.NewBuffer(nil) if err := tl.Execute(result, ctx); err != nil { - return false, fmt.Errorf("failed to execute template '%s': %v", input, err) + return false, fmt.Errorf("failed to execute template '%s': %w", input, err) } klog.V(6).InfoS(" parse template succeed", "result", result.String()) if result.String() != "true" { return false, nil } } + return true, nil } @@ -53,15 +56,18 @@ func ParseString(ctx map[string]any, input string) (string, error) { if !IsTmplSyntax(input) { return input, nil } + tl, err := internal.Template.Parse(input) if err != nil { - return "", fmt.Errorf("failed to parse template '%s': %v", input, err) + return "", fmt.Errorf("failed to parse template '%s': %w", input, err) } + result := bytes.NewBuffer(nil) if err := tl.Execute(result, ctx); err != nil { - return "", fmt.Errorf("failed to execute template '%s': %v", input, err) + return "", fmt.Errorf("failed to execute template '%s': %w", input, err) } klog.V(6).InfoS(" parse template succeed", "result", result.String()) + return strings.TrimPrefix(strings.TrimSuffix(result.String(), "\n"), "\n"), nil } diff --git a/pkg/converter/tmpl/template_test.go b/pkg/converter/tmpl/template_test.go index 39e661fc..ac3d8931 100644 --- a/pkg/converter/tmpl/template_test.go +++ b/pkg/converter/tmpl/template_test.go @@ -85,13 +85,13 @@ func TestParseBool(t *testing.T) { { name: "eq true-1", condition: []string{"{{ ne .foo \"\" }}"}, - variable: map[string]any{}, + variable: make(map[string]any), excepted: true, }, { name: "eq true-1", condition: []string{"{{ and .foo (ne .foo \"\") }}"}, - variable: map[string]any{}, + variable: make(map[string]any), excepted: false, }, // ======= value exist ======= @@ -123,7 +123,7 @@ func TestParseBool(t *testing.T) { { name: "default true-1", condition: []string{"{{ .foo | default true }}"}, - variable: map[string]any{}, + variable: make(map[string]any), excepted: true, }, // ======= has ======= @@ -366,20 +366,20 @@ func TestParseFunction(t *testing.T) { { name: "default string 1", input: "{{ .foo | default \"bar\" }}", - variable: map[string]any{}, + variable: make(map[string]any), excepted: "bar", }, { name: "default string 2", input: "{{ default .foo \"bar\" }}", - variable: map[string]any{}, + variable: make(map[string]any), excepted: "bar", }, { name: "default number 1", input: "{{ .foo | default 1 }}", - variable: map[string]any{}, + variable: make(map[string]any), excepted: "1", }, // ======= split ======= @@ -575,7 +575,7 @@ func TestParseFunction(t *testing.T) { { name: "trimPrefix 2", input: `{{ .foo | default "" |trimPrefix "v" }}`, - variable: map[string]any{}, + variable: make(map[string]any), excepted: "", }, } @@ -627,7 +627,7 @@ func TestParseCustomFunction(t *testing.T) { { name: "pow true-1", input: "{{ pow 2 3 }}", - variable: map[string]any{}, + variable: make(map[string]any), excepted: "8", }, } diff --git a/pkg/executor/block_executor.go b/pkg/executor/block_executor.go new file mode 100644 index 00000000..59782e5c --- /dev/null +++ b/pkg/executor/block_executor.go @@ -0,0 +1,209 @@ +package executor + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "slices" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + "github.com/kubesphere/kubekey/v4/pkg/converter" + "github.com/kubesphere/kubekey/v4/pkg/modules" + "github.com/kubesphere/kubekey/v4/pkg/variable" +) + +type blockExecutor struct { + *option + + // playbook level config + hosts []string // which hosts will run playbook + ignoreErrors *bool // IgnoreErrors for playbook + // blocks level config + blocks []kkprojectv1.Block + role string // role name of blocks + when []string // when condition for blocks + tags kkprojectv1.Taggable +} + +// Exec block. convert block to task and executor it. +func (e blockExecutor) Exec(ctx context.Context) error { + for _, block := range e.blocks { + hosts := e.dealRunOnce(block.RunOnce) + tags := e.dealTags(block.Taggable) + ignoreErrors := e.dealIgnoreErrors(block.IgnoreErrors) + when := e.dealWhen(block.When) + + // // check tags + if !tags.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { + // if not match the tags. skip + continue + } + + // merge variable which defined in block + if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars, hosts...)); err != nil { + klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", block.Name) + + return err + } + + switch { + case len(block.Block) != 0: + if err := e.dealBlock(ctx, hosts, ignoreErrors, when, tags, block); err != nil { + klog.V(5).ErrorS(err, "deal block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + case block.IncludeTasks != "": + // do nothing. include tasks has converted to blocks. + default: + if err := e.dealTask(ctx, hosts, when, block); err != nil { + klog.V(5).ErrorS(err, "deal task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + } + } + + return nil +} + +// dealRunOnce "run_once" argument in block. +// If RunOnce is true, it's always only run in the first host. +// Otherwise, return hosts which defined in parent block. +func (e blockExecutor) dealRunOnce(runOnce bool) []string { + hosts := e.hosts + if runOnce { + // runOnce only run in first node + hosts = hosts[:1] + } + + return hosts +} + +// dealIgnoreErrors "ignore_errors" argument in block. +// if ignore_errors not defined in block, set it which defined in parent block. +func (e blockExecutor) dealIgnoreErrors(ie *bool) *bool { + if ie == nil { + ie = e.ignoreErrors + } + + return ie +} + +// dealTags "tags" argument in block. block tags inherits parent block +func (e blockExecutor) dealTags(taggable kkprojectv1.Taggable) kkprojectv1.Taggable { + return kkprojectv1.JoinTag(taggable, e.tags) +} + +// dealWhen argument in block. block when inherits parent block. +func (e blockExecutor) dealWhen(when kkprojectv1.When) []string { + w := e.when + for _, d := range when.Data { + if !slices.Contains(w, d) { + w = append(w, d) + } + } + + return w +} + +// dealBlock "block" argument has defined in block. execute order is: block -> rescue -> always +// If rescue is defined, execute it when block execute error. +// If always id defined, execute it. +func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErrors *bool, when []string, tags kkprojectv1.Taggable, block kkprojectv1.Block) error { + var errs error + // exec block + if err := (blockExecutor{ + option: e.option, + hosts: hosts, + ignoreErrors: ignoreErrors, + role: e.role, + blocks: block.Block, + when: when, + tags: tags, + }.Exec(ctx)); err != nil { + klog.V(5).ErrorS(err, "execute tasks from block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + errs = errors.Join(errs, err) + } + // if block exec failed exec rescue + if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(block.Rescue) != 0 { + if err := (blockExecutor{ + option: e.option, + hosts: hosts, + ignoreErrors: ignoreErrors, + blocks: block.Rescue, + role: e.role, + when: when, + tags: tags, + }.Exec(ctx)); err != nil { + klog.V(5).ErrorS(err, "execute tasks from rescue error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + errs = errors.Join(errs, err) + } + } + // exec always after block + if len(block.Always) != 0 { + if err := (blockExecutor{ + option: e.option, + hosts: hosts, + ignoreErrors: ignoreErrors, + blocks: block.Always, + role: e.role, + when: when, + tags: tags, + }.Exec(ctx)); err != nil { + klog.V(5).ErrorS(err, "execute tasks from always error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + errs = errors.Join(errs, err) + } + } + // when execute error. return + return errs +} + +// dealTask "block" argument is not defined in block. +func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []string, block kkprojectv1.Block) error { + task := converter.MarshalBlock(e.role, hosts, when, block) + // complete by pipeline + task.GenerateName = e.pipeline.Name + "-" + task.Namespace = e.pipeline.Namespace + if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil { + klog.V(5).ErrorS(err, "Set controller reference error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + // complete module by unknown field + for n, a := range block.UnknownField { + data, err := json.Marshal(a) + if err != nil { + klog.V(5).ErrorS(err, "Marshal unknown field error", "field", n, "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + if m := modules.FindModule(n); m != nil { + task.Spec.Module.Name = n + task.Spec.Module.Args = runtime.RawExtension{Raw: data} + + break + } + } + + if task.Spec.Module.Name == "" { // action is necessary for a task + klog.V(5).ErrorS(nil, "No module/action detected in task", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return fmt.Errorf("no module/action detected in task: %s", task.Name) + } + + if err := (taskExecutor{option: e.option, task: task}.Exec(ctx)); err != nil { + klog.V(5).ErrorS(err, "exec task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + + return nil +} diff --git a/pkg/executor/block_executor_test.go b/pkg/executor/block_executor_test.go new file mode 100644 index 00000000..995f2150 --- /dev/null +++ b/pkg/executor/block_executor_test.go @@ -0,0 +1,133 @@ +package executor + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" + + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" +) + +func TestBlockExecutor_DealRunOnce(t *testing.T) { + testcases := []struct { + name string + runOnce bool + except []string + }{ + { + name: "runonce is false", + runOnce: false, + except: []string{"node1", "node2", "node3"}, + }, + { + name: "runonce is true", + runOnce: true, + except: []string{"node1"}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.ElementsMatch(t, blockExecutor{ + hosts: []string{"node1", "node2", "node3"}, + }.dealRunOnce(tc.runOnce), tc.except) + }) + } +} + +func TestBlockExecutor_DealIgnoreErrors(t *testing.T) { + testcases := []struct { + name string + ignoreErrors *bool + except *bool + }{ + { + name: "ignoreErrors is empty", + ignoreErrors: nil, + except: ptr.To(true), + }, + { + name: "ignoreErrors is true", + ignoreErrors: ptr.To(true), + except: ptr.To(true), + }, + { + name: "ignoreErrors is false", + ignoreErrors: ptr.To(false), + except: ptr.To(false), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, blockExecutor{ + ignoreErrors: ptr.To(true), + }.dealIgnoreErrors(tc.ignoreErrors), tc.except) + }) + } +} + +func TestBlockExecutor_DealTags(t *testing.T) { + testcases := []struct { + name string + tags kkprojectv1.Taggable + except kkprojectv1.Taggable + }{ + { + name: "single tags", + tags: kkprojectv1.Taggable{Tags: []string{"c"}}, + except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}}, + }, + { + name: "mutil tags", + tags: kkprojectv1.Taggable{Tags: []string{"c", "d"}}, + except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c", "d"}}, + }, + { + name: "repeat tags", + tags: kkprojectv1.Taggable{Tags: []string{"b", "c"}}, + except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.ElementsMatch(t, blockExecutor{ + tags: kkprojectv1.Taggable{Tags: []string{"a", "b"}}, + }.dealTags(tc.tags).Tags, tc.except.Tags) + }) + } +} + +func TestBlockExecutor_DealWhen(t *testing.T) { + testcases := []struct { + name string + when []string + except []string + }{ + { + name: "single when", + when: []string{"c"}, + except: []string{"a", "b", "c"}, + }, + { + name: "mutil when", + when: []string{"c", "d"}, + except: []string{"a", "b", "c", "d"}, + }, + { + name: "repeat when", + when: []string{"b", "c"}, + except: []string{"a", "b", "c"}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.ElementsMatch(t, blockExecutor{ + when: []string{"a", "b"}, + }.dealWhen(kkprojectv1.When{Data: tc.when}), tc.except) + }) + } +} diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index dbc9d45c..a3d8e12b 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -1,602 +1,26 @@ -/* -Copyright 2024 The KubeSphere Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package executor import ( "context" - "errors" - "fmt" "io" - "os" - "strings" - "time" - "github.com/schollz/progressbar/v3" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" - "github.com/kubesphere/kubekey/v4/pkg/connector" - _const "github.com/kubesphere/kubekey/v4/pkg/const" - "github.com/kubesphere/kubekey/v4/pkg/converter" - "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" - "github.com/kubesphere/kubekey/v4/pkg/modules" - "github.com/kubesphere/kubekey/v4/pkg/project" "github.com/kubesphere/kubekey/v4/pkg/variable" ) -// TaskExecutor all task in pipeline -type TaskExecutor interface { +// Executor all task in pipeline +type Executor interface { Exec(ctx context.Context) error } -func NewTaskExecutor(client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) TaskExecutor { - // get variable - v, err := variable.New(client, *pipeline) - if err != nil { - klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) - return nil - } - - return &executor{ - client: client, - pipeline: pipeline, - variable: v, - logOutput: logOutput, - } -} - -type executor struct { +// option for pipelineExecutor, blockExecutor, taskExecutor +type option struct { client ctrlclient.Client pipeline *kkcorev1.Pipeline variable variable.Variable - + // commandLine log output. default os.stdout logOutput io.Writer } - -type execBlockOptions struct { - // playbook level config - hosts []string // which hosts will run playbook - ignoreErrors *bool // IgnoreErrors for playbook - // blocks level config - blocks []projectv1.Block - role string // role name of blocks - when []string // when condition for blocks - tags projectv1.Taggable -} - -func (e executor) Exec(ctx context.Context) error { - klog.V(6).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) - pj, err := project.New(*e.pipeline, true) - if err != nil { - return fmt.Errorf("deal project error: %w", err) - } - - // convert to transfer.Playbook struct - pb, err := pj.MarshalPlaybook() - if err != nil { - return fmt.Errorf("convert playbook error: %w", err) - } - - for _, play := range pb.Play { - if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { - // if not match the tags. skip - continue - } - // hosts should contain all host's name. hosts should not be empty. - var hosts []string - if ahn, err := e.variable.Get(variable.GetHostnames(play.PlayHost.Hosts)); err == nil { - hosts = ahn.([]string) - } - if len(hosts) == 0 { // if hosts is empty skip this playbook - klog.V(5).Info("Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) - continue - } - - // when gather_fact is set. get host's information from remote. - if play.GatherFacts { - for _, h := range hosts { - gfv, err := e.getGatherFact(ctx, h, e.variable) - if err != nil { - return fmt.Errorf("get gather fact error: %w", err) - } - // merge host information to runtime variable - if err := e.variable.Merge(variable.MergeRemoteVariable(h, gfv)); err != nil { - klog.V(5).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h) - return fmt.Errorf("merge gather fact error: %w", err) - } - } - } - - // Batch execution, with each batch being a group of hosts run in serial. - var batchHosts [][]string - if play.RunOnce { - // runOnce only run in first node - batchHosts = [][]string{{hosts[0]}} - } else { - // group hosts by serial. run the playbook by serial - batchHosts, err = converter.GroupHostBySerial(hosts, play.Serial.Data) - if err != nil { - return fmt.Errorf("group host by serial error: %w", err) - } - } - - // generate and execute task. - for _, serials := range batchHosts { - // each batch hosts should not be empty. - if len(serials) == 0 { - klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) - return fmt.Errorf("host is empty") - } - - if err := e.mergeVariable(ctx, e.variable, play.Vars, serials...); err != nil { - return fmt.Errorf("merge variable error: %w", err) - } - // generate task from pre tasks - if err := e.execBlock(ctx, execBlockOptions{ - hosts: serials, - ignoreErrors: play.IgnoreErrors, - blocks: play.PreTasks, - tags: play.Taggable, - }); err != nil { - return fmt.Errorf("execute pre-tasks from play error: %w", err) - } - // generate task from role - for _, role := range play.Roles { - if err := e.mergeVariable(ctx, e.variable, role.Vars, serials...); err != nil { - return fmt.Errorf("merge variable error: %w", err) - } - // use the most closely configuration - ignoreErrors := role.IgnoreErrors - if ignoreErrors == nil { - ignoreErrors = play.IgnoreErrors - } - - if err := e.execBlock(ctx, execBlockOptions{ - hosts: serials, - ignoreErrors: ignoreErrors, - blocks: role.Block, - role: role.Role, - when: role.When.Data, - tags: projectv1.JoinTag(role.Taggable, play.Taggable), - }); err != nil { - return fmt.Errorf("execute role-tasks error: %w", err) - } - } - // generate task from tasks - if err := e.execBlock(ctx, execBlockOptions{ - hosts: serials, - ignoreErrors: play.IgnoreErrors, - blocks: play.Tasks, - tags: play.Taggable, - }); err != nil { - return fmt.Errorf("execute tasks error: %w", err) - } - // generate task from post tasks - if err := e.execBlock(ctx, execBlockOptions{ - hosts: serials, - ignoreErrors: play.IgnoreErrors, - blocks: play.Tasks, - tags: play.Taggable, - }); err != nil { - return fmt.Errorf("execute post-tasks error: %w", err) - } - } - } - return nil -} - -// getGatherFact get host info -func (e executor) getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (map[string]any, error) { - v, err := vars.Get(variable.GetParamVariable(hostname)) - if err != nil { - klog.V(5).ErrorS(err, "Get host variable error", "hostname", hostname) - return nil, err - } - connectorVars := make(map[string]any) - if c1, ok := v.(map[string]any)[_const.VariableConnector]; ok { - if c2, ok := c1.(map[string]any); ok { - connectorVars = c2 - } - } - conn, err := connector.NewConnector(hostname, connectorVars) - if err != nil { - klog.V(5).ErrorS(err, "New connector error", "hostname", hostname) - return nil, err - } - if err := conn.Init(ctx); err != nil { - klog.V(5).ErrorS(err, "Init connection error", "hostname", hostname) - return nil, err - } - defer conn.Close(ctx) - - if gf, ok := conn.(connector.GatherFacts); ok { - return gf.Info(ctx) - } - klog.V(5).ErrorS(nil, "gather fact is not defined in this connector", "hostname", hostname) - return nil, nil -} - -// execBlock loop block and generate task. -func (e executor) execBlock(ctx context.Context, options execBlockOptions) error { - for _, at := range options.blocks { - if !projectv1.JoinTag(at.Taggable, options.tags).IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { - continue - } - hosts := options.hosts - if at.RunOnce { // only run in first host - hosts = []string{options.hosts[0]} - } - tags := projectv1.JoinTag(at.Taggable, options.tags) - - // use the most closely configuration - ignoreErrors := at.IgnoreErrors - if ignoreErrors == nil { - ignoreErrors = options.ignoreErrors - } - // merge variable which defined in block - if err := e.mergeVariable(ctx, e.variable, at.Vars, hosts...); err != nil { - klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", at.Name) - return err - } - - switch { - case len(at.Block) != 0: - var errs error - // exec block - if err := e.execBlock(ctx, execBlockOptions{ - hosts: hosts, - ignoreErrors: ignoreErrors, - role: options.role, - blocks: at.Block, - when: append(options.when, at.When.Data...), - tags: tags, - }); err != nil { - klog.V(5).ErrorS(err, "execute tasks from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - errs = errors.Join(errs, err) - } - - // if block exec failed exec rescue - if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(at.Rescue) != 0 { - if err := e.execBlock(ctx, execBlockOptions{ - hosts: hosts, - ignoreErrors: ignoreErrors, - blocks: at.Rescue, - role: options.role, - when: append(options.when, at.When.Data...), - tags: tags, - }); err != nil { - klog.V(5).ErrorS(err, "execute tasks from rescue error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - errs = errors.Join(errs, err) - } - } - - // exec always after block - if len(at.Always) != 0 { - if err := e.execBlock(ctx, execBlockOptions{ - hosts: hosts, - ignoreErrors: ignoreErrors, - blocks: at.Always, - role: options.role, - when: append(options.when, at.When.Data...), - tags: tags, - }); err != nil { - klog.V(5).ErrorS(err, "execute tasks from always error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - errs = errors.Join(errs, err) - } - } - - // when execute error. return - if errs != nil { - return errs - } - - case at.IncludeTasks != "": - // include tasks has converted to blocks. - // do nothing - default: - task := converter.MarshalBlock(ctx, options.role, hosts, append(options.when, at.When.Data...), at) - // complete by pipeline - task.GenerateName = e.pipeline.Name + "-" - task.Namespace = e.pipeline.Namespace - if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil { - klog.V(5).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - return err - } - // complete module by unknown field - for n, a := range at.UnknownFiled { - data, err := json.Marshal(a) - if err != nil { - klog.V(5).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name, "field", n) - return err - } - if m := modules.FindModule(n); m != nil { - task.Spec.Module.Name = n - task.Spec.Module.Args = runtime.RawExtension{Raw: data} - break - } - } - if task.Spec.Module.Name == "" { // action is necessary for a task - klog.V(5).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - return fmt.Errorf("no module/action detected in task: %s", task.Name) - } - // create task - if err := e.client.Create(ctx, task); err != nil { - klog.V(5).ErrorS(err, "create task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - return err - } - - for { - var roleLog string - if task.Annotations[kkcorev1alpha1.TaskAnnotationRole] != "" { - roleLog = "[" + task.Annotations[kkcorev1alpha1.TaskAnnotationRole] + "] " - } - klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(task)) - fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, task.Spec.Name) - // exec task - task.Status.Phase = kkcorev1alpha1.TaskPhaseRunning - if err := e.client.Status().Update(ctx, task); err != nil { - klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task)) - } - if err := e.executeTask(ctx, task, options); err != nil { - klog.V(5).ErrorS(err, "exec task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name) - return err - } - if err := e.client.Status().Update(ctx, task); err != nil { - klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task)) - return err - } - - if task.IsComplete() { - break - } - } - e.pipeline.Status.TaskResult.Total++ - switch task.Status.Phase { - case kkcorev1alpha1.TaskPhaseSuccess: - e.pipeline.Status.TaskResult.Success++ - case kkcorev1alpha1.TaskPhaseIgnored: - e.pipeline.Status.TaskResult.Ignored++ - case kkcorev1alpha1.TaskPhaseFailed: - e.pipeline.Status.TaskResult.Failed++ - } - - // exit when task run failed - if task.IsFailed() { - var hostReason []kkcorev1.PipelineFailedDetailHost - for _, tr := range task.Status.HostResults { - hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{ - Host: tr.Host, - Stdout: tr.Stdout, - StdErr: tr.StdErr, - }) - } - e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{ - Task: task.Spec.Name, - Hosts: hostReason, - }) - e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed - return fmt.Errorf("task %s run failed", task.Spec.Name) - } - } - } - return nil -} - -// executeTask parallel in each host. -func (e executor) executeTask(ctx context.Context, task *kkcorev1alpha1.Task, options execBlockOptions) error { - // check task host results - wg := &wait.Group{} - task.Status.HostResults = make([]kkcorev1alpha1.TaskHostResult, len(task.Spec.Hosts)) - - for i, h := range task.Spec.Hosts { - wg.StartWithContext(ctx, func(ctx context.Context) { - // task result - var stdout, stderr string - defer func() { - if task.Spec.Register != "" { - var stdoutResult any = stdout - var stderrResult any = stderr - // try to convert by json - _ = json.Unmarshal([]byte(stdout), &stdoutResult) - // try to convert by json - _ = json.Unmarshal([]byte(stderr), &stderrResult) - // set variable to parent location - if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{ - task.Spec.Register: map[string]any{ - "stdout": stdoutResult, - "stderr": stderrResult, - }, - })); err != nil { - stderr = fmt.Sprintf("register task result to variable error: %v", err) - return - } - } - if stderr != "" && task.Spec.IgnoreError != nil && *task.Spec.IgnoreError { - klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task)) - } else if stderr != "" { - klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task)) - } - // fill result - task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{ - Host: h, - Stdout: stdout, - StdErr: stderr, - } - }() - // task log - // placeholder format task log - var placeholder string - if hostNameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil { - placeholder = strings.Repeat(" ", hostNameMaxLen.(int)-len(h)) - } - // progress bar for task - var bar = progressbar.NewOptions(-1, - progressbar.OptionSetWriter(e.logOutput), - progressbar.OptionSpinnerCustom([]string{" "}), - progressbar.OptionEnableColorCodes(true), - progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)), - progressbar.OptionOnCompletion(func() { - if _, err := os.Stdout.WriteString("\n"); err != nil { - klog.ErrorS(err, "failed to write output", "host", h) - } - }), - ) - go func() { - for !bar.IsFinished() { - if err := bar.Add(1); err != nil { - return - } - time.Sleep(100 * time.Millisecond) - } - }() - defer func() { - switch { - case stderr != "": - if task.Spec.IgnoreError != nil && *task.Spec.IgnoreError { // ignore - bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder)) - } else { // failed - bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[31mfailed \033[0m", h, placeholder)) - } - case stdout == "skip": // skip - bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mskip \033[0m", h, placeholder)) - default: //success - bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34msuccess\033[0m", h, placeholder)) - } - if err := bar.Finish(); err != nil { - klog.ErrorS(err, "finish bar error") - } - }() - // task execute - ha, err := e.variable.Get(variable.GetAllVariable(h)) - if err != nil { - stderr = fmt.Sprintf("get variable error: %v", err) - return - } - // check when condition - if len(task.Spec.When) > 0 { - ok, err := tmpl.ParseBool(ha.(map[string]any), task.Spec.When) - if err != nil { - stderr = fmt.Sprintf("parse when condition error: %v", err) - return - } - if !ok { - stdout = "skip" - return - } - } - // execute module with loop - // if loop is empty. execute once, and the item is null - for _, item := range e.parseLoop(ctx, ha.(map[string]any), task) { - // set item to runtime variable - if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{ - _const.VariableItem: item, - })); err != nil { - stderr = fmt.Sprintf("set loop item to variable error: %v", err) - return - } - stdout, stderr = e.executeModule(ctx, task, modules.ExecOptions{ - Args: task.Spec.Module.Args, - Host: h, - Variable: e.variable, - Task: *task, - Pipeline: *e.pipeline, - }) - // delete item - if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{ - _const.VariableItem: nil, - })); err != nil { - stderr = fmt.Sprintf("clean loop item to variable error: %v", err) - return - } - } - }) - } - wg.Wait() - // host result for task - task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess - for _, data := range task.Status.HostResults { - if data.StdErr != "" { - if task.Spec.IgnoreError != nil && *task.Spec.IgnoreError { - task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored - } else { - task.Status.Phase = kkcorev1alpha1.TaskPhaseFailed - } - break - } - } - - return nil -} - -// parseLoop parse loop to slice. if loop contains template string. convert it. -// loop is json string. try convertor to string slice by json. -// loop is normal string. set it to empty slice and return. -// loop is string slice. return it. -func (e executor) parseLoop(ctx context.Context, ha map[string]any, task *kkcorev1alpha1.Task) []any { - switch { - case task.Spec.Loop.Raw == nil: - // loop is not set. add one element to execute once module. - return []any{nil} - default: - return variable.Extension2Slice(ha, task.Spec.Loop) - } -} - -// executeModule find register module and execute it. -func (e executor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, opts modules.ExecOptions) (string, string) { - // get all variable. which contains item. - lg, err := opts.Variable.Get(variable.GetAllVariable(opts.Host)) - if err != nil { - klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(task)) - return "", err.Error() - } - // check failed when condition - if len(task.Spec.FailedWhen) > 0 { - ok, err := tmpl.ParseBool(lg.(map[string]any), task.Spec.FailedWhen) - if err != nil { - klog.V(5).ErrorS(err, "validate FailedWhen condition error", "task", ctrlclient.ObjectKeyFromObject(task)) - return "", err.Error() - } - if ok { - return "", "failed by failedWhen" - } - } - - return modules.FindModule(task.Spec.Module.Name)(ctx, opts) -} - -// mergeVariable to runtime variable -func (e executor) mergeVariable(ctx context.Context, v variable.Variable, vd map[string]any, hosts ...string) error { - if len(vd) == 0 { - // skip - return nil - } - for _, host := range hosts { - if err := v.Merge(variable.MergeRuntimeVariable(host, vd)); err != nil { - return err - } - } - return nil -} diff --git a/pkg/executor/executor_test.go b/pkg/executor/executor_test.go new file mode 100644 index 00000000..d0fbddaa --- /dev/null +++ b/pkg/executor/executor_test.go @@ -0,0 +1,73 @@ +package executor + +import ( + "context" + "os" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/variable" + "github.com/kubesphere/kubekey/v4/pkg/variable/source" +) + +func newTestOption() (*option, error) { + var err error + + o := &option{ + client: fake.NewClientBuilder().WithScheme(_const.Scheme).WithStatusSubresource(&kkcorev1.Pipeline{}, &kkcorev1alpha1.Task{}).Build(), + pipeline: &kkcorev1.Pipeline{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: kkcorev1.PipelineSpec{ + InventoryRef: &corev1.ObjectReference{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + ConfigRef: &corev1.ObjectReference{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + }, + Status: kkcorev1.PipelineStatus{}, + }, + logOutput: os.Stdout, + } + + if err := o.client.Create(context.TODO(), &kkcorev1.Inventory{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: kkcorev1.InventorySpec{}, + }); err != nil { + return nil, err + } + + if err := o.client.Create(context.TODO(), &kkcorev1.Config{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: runtime.RawExtension{}, + }); err != nil { + return nil, err + } + + o.variable, err = variable.New(context.TODO(), o.client, *o.pipeline, source.MemorySource) + if err != nil { + return nil, err + } + + return o, nil +} diff --git a/pkg/executor/pipeline_executor.go b/pkg/executor/pipeline_executor.go new file mode 100644 index 00000000..7338c8c2 --- /dev/null +++ b/pkg/executor/pipeline_executor.go @@ -0,0 +1,276 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package executor + +import ( + "context" + "errors" + "fmt" + "io" + + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + "github.com/kubesphere/kubekey/v4/pkg/connector" + _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/converter" + "github.com/kubesphere/kubekey/v4/pkg/project" + "github.com/kubesphere/kubekey/v4/pkg/variable" + "github.com/kubesphere/kubekey/v4/pkg/variable/source" +) + +// NewPipelineExecutor return a new pipelineExecutor +func NewPipelineExecutor(ctx context.Context, client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) Executor { + // get variable + v, err := variable.New(ctx, client, *pipeline, source.FileSource) + if err != nil { + klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline)) + + return nil + } + + return &pipelineExecutor{ + option: &option{ + client: client, + pipeline: pipeline, + variable: v, + logOutput: logOutput, + }, + } +} + +// executor for pipeline +type pipelineExecutor struct { + *option +} + +// Exec pipeline. covert playbook to block and executor it. +func (e pipelineExecutor) Exec(ctx context.Context) error { + klog.V(5).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + pj, err := project.New(ctx, *e.pipeline, true) + if err != nil { + return fmt.Errorf("deal project error: %w", err) + } + + // convert to transfer.Playbook struct + pb, err := pj.MarshalPlaybook() + if err != nil { + return fmt.Errorf("convert playbook error: %w", err) + } + + for _, play := range pb.Play { + // check tags + if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) { + // if not match the tags. skip + continue + } + // hosts should contain all host's name. hosts should not be empty. + var hosts []string + if err := e.dealHosts(play.PlayHost, &hosts); err != nil { + klog.V(4).ErrorS(err, "deal hosts error, skip this playbook", "hosts", play.PlayHost) + + continue + } + // when gather_fact is set. get host's information from remote. + if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil { + return fmt.Errorf("deal gather_facts argument error: %w", err) + } + // Batch execution, with each batch being a group of hosts run in serial. + var batchHosts [][]string + if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil { + return fmt.Errorf("deal serial argument error: %w", err) + } + e.dealRunOnce(play.RunOnce, hosts, &batchHosts) + // exec pipeline in each BatchHosts + if err := e.execBatchHosts(ctx, play, batchHosts); err != nil { + return fmt.Errorf("exec batch hosts error: %v", err) + } + } + + return nil +} + +// execBatchHosts executor block in play order by: "pre_tasks" > "roles" > "tasks" > "post_tasks" +func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.Play, batchHosts [][]string) any { + // generate and execute task. + for _, serials := range batchHosts { + // each batch hosts should not be empty. + if len(serials) == 0 { + klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return errors.New("host is empty") + } + + if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil { + return fmt.Errorf("merge variable error: %w", err) + } + // generate task from pre tasks + if err := (blockExecutor{ + option: e.option, + hosts: serials, + ignoreErrors: play.IgnoreErrors, + blocks: play.PreTasks, + tags: play.Taggable, + }.Exec(ctx)); err != nil { + return fmt.Errorf("execute pre-tasks from play error: %w", err) + } + // generate task from role + for _, role := range play.Roles { + if err := e.variable.Merge(variable.MergeRuntimeVariable(role.Vars, serials...)); err != nil { + return fmt.Errorf("merge variable error: %w", err) + } + // use the most closely configuration + ignoreErrors := role.IgnoreErrors + if ignoreErrors == nil { + ignoreErrors = play.IgnoreErrors + } + // role is block. + if err := (blockExecutor{ + option: e.option, + hosts: serials, + ignoreErrors: ignoreErrors, + blocks: role.Block, + role: role.Role, + when: role.When.Data, + tags: kkprojectv1.JoinTag(role.Taggable, play.Taggable), + }.Exec(ctx)); err != nil { + return fmt.Errorf("execute role-tasks error: %w", err) + } + } + // generate task from tasks + if err := (blockExecutor{ + option: e.option, + hosts: serials, + ignoreErrors: play.IgnoreErrors, + blocks: play.Tasks, + tags: play.Taggable, + }.Exec(ctx)); err != nil { + return fmt.Errorf("execute tasks error: %w", err) + } + // generate task from post tasks + if err := (blockExecutor{ + option: e.option, + hosts: serials, + ignoreErrors: play.IgnoreErrors, + blocks: play.Tasks, + tags: play.Taggable, + }.Exec(ctx)); err != nil { + return fmt.Errorf("execute post-tasks error: %w", err) + } + } + + return nil +} + +// dealHosts "hosts" argument in playbook. get hostname from kkprojectv1.PlayHost +func (e pipelineExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error { + ahn, err := e.variable.Get(variable.GetHostnames(host.Hosts)) + if err != nil { + return fmt.Errorf("getHostnames error: %w", err) + } + + if h, ok := ahn.([]string); ok { + *i = h + } + if len(*i) == 0 { // if hosts is empty skip this playbook + return errors.New("hosts is empty") + } + + return nil +} + +// dealGatherFacts "gather_facts" argument in playbook. get host remote info and merge to variable +func (e pipelineExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool, hosts []string) error { + if !gatherFacts { + // skip + return nil + } + + dealGatherFactsInHost := func(hostname string) error { + v, err := e.variable.Get(variable.GetParamVariable(hostname)) + if err != nil { + klog.V(5).ErrorS(err, "get host variable error", "hostname", hostname) + + return err + } + + connectorVars := make(map[string]any) + if c1, ok := v.(map[string]any)[_const.VariableConnector]; ok { + if c2, ok := c1.(map[string]any); ok { + connectorVars = c2 + } + } + // get host connector + conn, err := connector.NewConnector(hostname, connectorVars) + if err != nil { + klog.V(5).ErrorS(err, "new connector error", "hostname", hostname) + + return err + } + if err := conn.Init(ctx); err != nil { + klog.V(5).ErrorS(err, "init connection error", "hostname", hostname) + + return err + } + defer conn.Close(ctx) + + if gf, ok := conn.(connector.GatherFacts); ok { + remoteInfo, err := gf.HostInfo(ctx) + if err != nil { + klog.V(5).ErrorS(err, "gatherFacts from connector error", "hostname", hostname) + + return err + } + if err := e.variable.Merge(variable.MergeRemoteVariable(remoteInfo, hostname)); err != nil { + klog.V(5).ErrorS(err, "merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", hostname) + + return fmt.Errorf("merge gather fact error: %w", err) + } + } + + return nil + } + + for _, hostname := range hosts { + if err := dealGatherFactsInHost(hostname); err != nil { + return err + } + } + + return nil +} + +// dealSerial "serial" argument in playbook. +func (e pipelineExecutor) dealSerial(serial []any, hosts []string, batchHosts *[][]string) error { + var err error + *batchHosts, err = converter.GroupHostBySerial(hosts, serial) + if err != nil { + return fmt.Errorf("group host by serial error: %w", err) + } + + return nil +} + +// dealRunOnce argument in playbook. if RunOnce is true. it's always only run in the first hosts. +func (e pipelineExecutor) dealRunOnce(runOnce bool, hosts []string, batchHosts *[][]string) { + if runOnce { + // runOnce only run in first node + *batchHosts = [][]string{{hosts[0]}} + } +} diff --git a/pkg/executor/pipeline_executor_test.go b/pkg/executor/pipeline_executor_test.go new file mode 100644 index 00000000..a6c76721 --- /dev/null +++ b/pkg/executor/pipeline_executor_test.go @@ -0,0 +1,38 @@ +package executor + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPipelineExecutor_DealRunOnce(t *testing.T) { + testcases := []struct { + name string + runOnce bool + hosts []string + batchHosts [][]string + except [][]string + }{ + { + name: "runonce is false", + runOnce: false, + batchHosts: [][]string{{"node1", "node2"}}, + except: [][]string{{"node1", "node2"}}, + }, + { + name: "runonce is true", + runOnce: true, + hosts: []string{"node1"}, + batchHosts: [][]string{{"node1", "node2"}}, + except: [][]string{{"node1"}}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + pipelineExecutor{}.dealRunOnce(tc.runOnce, tc.hosts, &tc.batchHosts) + assert.Equal(t, tc.batchHosts, tc.except) + }) + } +} diff --git a/pkg/executor/task_executor.go b/pkg/executor/task_executor.go new file mode 100644 index 00000000..44a8382e --- /dev/null +++ b/pkg/executor/task_executor.go @@ -0,0 +1,342 @@ +package executor + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/schollz/progressbar/v3" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" + _const "github.com/kubesphere/kubekey/v4/pkg/const" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" + "github.com/kubesphere/kubekey/v4/pkg/modules" + "github.com/kubesphere/kubekey/v4/pkg/variable" +) + +type taskExecutor struct { + *option + task *kkcorev1alpha1.Task +} + +// Exec and store Task +func (e taskExecutor) Exec(ctx context.Context) error { + // create task + if err := e.client.Create(ctx, e.task); err != nil { + klog.V(5).ErrorS(err, "create task error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + defer func() { + e.pipeline.Status.TaskResult.Total++ + switch e.task.Status.Phase { + case kkcorev1alpha1.TaskPhaseSuccess: + e.pipeline.Status.TaskResult.Success++ + case kkcorev1alpha1.TaskPhaseIgnored: + e.pipeline.Status.TaskResult.Ignored++ + case kkcorev1alpha1.TaskPhaseFailed: + e.pipeline.Status.TaskResult.Failed++ + } + }() + + for !e.task.IsComplete() { + var roleLog string + if e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] != "" { + roleLog = "[" + e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] + "] " + } + klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(e.task)) + fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, e.task.Spec.Name) + // exec task + e.task.Status.Phase = kkcorev1alpha1.TaskPhaseRunning + if err := e.client.Status().Update(ctx, e.task); err != nil { + klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + } + e.execTask(ctx) + if err := e.client.Status().Update(ctx, e.task); err != nil { + klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline)) + + return err + } + } + // exit when task run failed + if e.task.IsFailed() { + var hostReason []kkcorev1.PipelineFailedDetailHost + for _, tr := range e.task.Status.HostResults { + hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{ + Host: tr.Host, + Stdout: tr.Stdout, + StdErr: tr.StdErr, + }) + } + e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{ + Task: e.task.Spec.Name, + Hosts: hostReason, + }) + e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed + + return fmt.Errorf("task %s run failed", e.task.Spec.Name) + } + + return nil +} + +// execTask +func (e taskExecutor) execTask(ctx context.Context) { + // check task host results + wg := &wait.Group{} + e.task.Status.HostResults = make([]kkcorev1alpha1.TaskHostResult, len(e.task.Spec.Hosts)) + for i, h := range e.task.Spec.Hosts { + wg.StartWithContext(ctx, e.execTaskHost(i, h)) + } + wg.Wait() + // host result for task + e.task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess + for _, data := range e.task.Status.HostResults { + if data.StdErr != "" { + if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { + e.task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored + } else { + e.task.Status.Phase = kkcorev1alpha1.TaskPhaseFailed + } + + break + } + } +} + +// execTaskHost deal module in each host parallel. +func (e taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) { + return func(ctx context.Context) { + // task result + var stdout, stderr string + defer func() { + if err := e.dealRegister(stdout, stderr, h); err != nil { + stderr = err.Error() + } + if stderr != "" && e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { + klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task)) + } else if stderr != "" { + klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task)) + } + // fill result + e.task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{ + Host: h, + Stdout: stdout, + StdErr: stderr, + } + }() + // task log + deferFunc := e.execTaskHostLogs(ctx, h, &stdout, &stderr) + defer deferFunc() + // task execute + ha, err := e.variable.Get(variable.GetAllVariable(h)) + if err != nil { + stderr = fmt.Sprintf("get variable error: %v", err) + + return + } + // convert hostVariable to map + had, ok := ha.(map[string]any) + if !ok { + stderr = fmt.Sprintf("variable is not map error: %v", err) + } + // check when condition + if skip := e.dealWhen(had, &stdout, &stderr); skip { + return + } + // execute module in loop with loop item. + // if loop is empty. execute once, and the item is null + for _, item := range e.dealLoop(had) { + // set item to runtime variable + if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{ + _const.VariableItem: item, + }, h)); err != nil { + stderr = fmt.Sprintf("set loop item to variable error: %v", err) + + return + } + e.executeModule(ctx, e.task, h, &stdout, &stderr) + // delete item + if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{ + _const.VariableItem: nil, + }, h)); err != nil { + stderr = fmt.Sprintf("clean loop item to variable error: %v", err) + + return + } + } + } +} + +// execTaskHostLogs logs for each host +func (e taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, stderr *string) func() { + // placeholder format task log + var placeholder string + if hostNameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil { + if hl, ok := hostNameMaxLen.(int); ok { + placeholder = strings.Repeat(" ", hl-len(h)) + } + } + // progress bar for task + var bar = progressbar.NewOptions(-1, + progressbar.OptionSetWriter(e.logOutput), + progressbar.OptionSpinnerCustom([]string{" "}), + progressbar.OptionEnableColorCodes(true), + progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)), + progressbar.OptionOnCompletion(func() { + if _, err := os.Stdout.WriteString("\n"); err != nil { + klog.ErrorS(err, "failed to write output", "host", h) + } + }), + ) + // run progress + go func() { + err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(context.Context) (bool, error) { + if bar.IsFinished() { + return true, nil + } + if err := bar.Add(1); err != nil { + return false, err + } + + return false, nil + }) + if err != nil { + klog.ErrorS(err, "failed to wait for task run to finish", "host", h) + } + }() + + return func() { + switch { + case *stderr != "": + if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { // ignore + bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder)) + } else { // failed + bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[31mfailed \033[0m", h, placeholder)) + } + case *stdout == modules.StdoutSkip: // skip + bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mskip \033[0m", h, placeholder)) + default: //success + bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34msuccess\033[0m", h, placeholder)) + } + if err := bar.Finish(); err != nil { + klog.ErrorS(err, "finish bar error") + } + } +} + +// execLoop parse loop to item slice and execute it. if loop contains template string. convert it. +// loop is json string. try convertor to string slice by json. +// loop is normal string. set it to empty slice and return. +func (e taskExecutor) dealLoop(ha map[string]any) []any { + var items []any + switch { + case e.task.Spec.Loop.Raw == nil: + // loop is not set. add one element to execute once module. + items = []any{nil} + default: + items = variable.Extension2Slice(ha, e.task.Spec.Loop) + } + + return items +} + +// executeModule find register module and execute it in a single host. +func (e taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, host string, stdout, stderr *string) { + // get all variable. which contains item. + ha, err := e.variable.Get(variable.GetAllVariable(host)) + if err != nil { + *stderr = fmt.Sprintf("failed to get host %s variable: %v", host, err) + + return + } + // convert hostVariable to map + had, ok := ha.(map[string]any) + if !ok { + *stderr = fmt.Sprintf("host: %s variable is not a map", host) + + return + } + // check failed when condition + if skip := e.dealFailedWhen(had, stdout, stderr); skip { + return + } + *stdout, *stderr = modules.FindModule(task.Spec.Module.Name)(ctx, modules.ExecOptions{ + Args: e.task.Spec.Module.Args, + Host: host, + Variable: e.variable, + Task: *e.task, + Pipeline: *e.pipeline, + }) +} + +// dealWhen "when" argument in task. +func (e taskExecutor) dealWhen(had map[string]any, stdout, stderr *string) bool { + if len(e.task.Spec.When) > 0 { + ok, err := tmpl.ParseBool(had, e.task.Spec.When) + if err != nil { + klog.V(5).ErrorS(err, "validate when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task)) + *stderr = fmt.Sprintf("parse when condition error: %v", err) + + return true + } + if !ok { + *stdout = modules.StdoutSkip + + return true + } + } + + return false +} + +// dealFailedWhen "failed_when" argument in task. +func (e taskExecutor) dealFailedWhen(had map[string]any, stdout, stderr *string) bool { + if len(e.task.Spec.FailedWhen) > 0 { + ok, err := tmpl.ParseBool(had, e.task.Spec.FailedWhen) + if err != nil { + klog.V(5).ErrorS(err, "validate failed_when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task)) + *stderr = fmt.Sprintf("parse failed_when condition error: %v", err) + + return true + } + if ok { + *stdout = modules.StdoutSkip + *stderr = "reach failed_when, failed" + + return true + } + } + + return false +} + +// dealRegister "register" argument in task. +func (e taskExecutor) dealRegister(stdout, stderr, host string) error { + if e.task.Spec.Register != "" { + var stdoutResult any = stdout + var stderrResult any = stderr + // try to convert by json + _ = json.Unmarshal([]byte(stdout), &stdoutResult) + // try to convert by json + _ = json.Unmarshal([]byte(stderr), &stderrResult) + // set variable to parent location + if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{ + e.task.Spec.Register: map[string]any{ + "stdout": stdoutResult, + "stderr": stderrResult, + }, + }, host)); err != nil { + return fmt.Errorf("register task result to variable error: %w", err) + } + } + + return nil +} diff --git a/pkg/executor/task_executor_test.go b/pkg/executor/task_executor_test.go new file mode 100644 index 00000000..fdf0bede --- /dev/null +++ b/pkg/executor/task_executor_test.go @@ -0,0 +1,71 @@ +package executor + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" +) + +func TestTaskExecutor(t *testing.T) { + testcases := []struct { + name string + task *kkcorev1alpha1.Task + }{ + { + name: "debug module in single host", + task: &kkcorev1alpha1.Task{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: kkcorev1alpha1.TaskSpec{ + Hosts: []string{"node1"}, + Module: kkcorev1alpha1.Module{ + Name: "debug", + Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)}, + }, + }, + Status: kkcorev1alpha1.TaskStatus{}, + }, + }, + { + name: "debug module in multiple hosts", + task: &kkcorev1alpha1.Task{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: corev1.NamespaceDefault, + }, + Spec: kkcorev1alpha1.TaskSpec{ + Hosts: []string{"node1", "n2"}, + Module: kkcorev1alpha1.Module{ + Name: "debug", + Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)}, + }, + }, + Status: kkcorev1alpha1.TaskStatus{}, + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + o, err := newTestOption() + if err != nil { + t.Fatal(err) + } + + if err := (&taskExecutor{ + option: o, + task: tc.task, + }).Exec(context.TODO()); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/pkg/manager/command_manager.go b/pkg/manager/command_manager.go index 3f9ea651..8e808967 100644 --- a/pkg/manager/command_manager.go +++ b/pkg/manager/command_manager.go @@ -41,6 +41,7 @@ type commandManager struct { logOutput io.Writer } +// Run command Manager. print log and run pipeline executor. func (m *commandManager) Run(ctx context.Context) error { fmt.Fprint(m.logOutput, ` @@ -66,7 +67,8 @@ func (m *commandManager) Run(ctx context.Context) error { klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir()) } } - if m.Pipeline.Spec.JobSpec.Schedule != "" { // if pipeline is cornJob. it's always running. + // if pipeline is cornJob. it's always running. + if m.Pipeline.Spec.JobSpec.Schedule != "" { m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning } // update pipeline status @@ -76,10 +78,11 @@ func (m *commandManager) Run(ctx context.Context) error { }() m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseSucceed - if err := executor.NewTaskExecutor(m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil { + if err := executor.NewPipelineExecutor(ctx, m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil { klog.ErrorS(err, "executor tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline)) m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed m.Pipeline.Status.Reason = err.Error() + return err } diff --git a/pkg/manager/controller_manager.go b/pkg/manager/controller_manager.go index 7e8bd66f..dd72a3cd 100644 --- a/pkg/manager/controller_manager.go +++ b/pkg/manager/controller_manager.go @@ -34,6 +34,7 @@ type controllerManager struct { LeaderElection bool } +// Run controllerManager, run controller in kubernetes func (c controllerManager) Run(ctx context.Context) error { ctrl.SetLogger(klog.NewKlogr()) restconfig, err := ctrl.GetConfig() @@ -45,6 +46,7 @@ func (c controllerManager) Run(ctx context.Context) error { if err != nil { return fmt.Errorf("could not get rest config: %w", err) } + mgr, err := ctrl.NewManager(restconfig, ctrl.Options{ Scheme: _const.Scheme, LeaderElection: c.LeaderElection, @@ -61,6 +63,7 @@ func (c controllerManager) Run(ctx context.Context) error { MaxConcurrentReconciles: c.MaxConcurrentReconciles, }).SetupWithManager(mgr); err != nil { klog.ErrorS(err, "create pipeline controller error") + return err } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 313f23fb..a8ecf0ea 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -31,6 +31,7 @@ type Manager interface { Run(ctx context.Context) error } +// CommandManagerOptions for NewCommandManager type CommandManagerOptions struct { *kkcorev1.Pipeline *kkcorev1.Config @@ -39,6 +40,7 @@ type CommandManagerOptions struct { ctrlclient.Client } +// NewCommandManager return a new commandManager func NewCommandManager(o CommandManagerOptions) Manager { return &commandManager{ Pipeline: o.Pipeline, @@ -49,11 +51,13 @@ func NewCommandManager(o CommandManagerOptions) Manager { } } +// ControllerManagerOptions for NewControllerManager type ControllerManagerOptions struct { MaxConcurrentReconciles int LeaderElection bool } +// NewControllerManager return a new controllerManager func NewControllerManager(o ControllerManagerOptions) Manager { return &controllerManager{ MaxConcurrentReconciles: o.MaxConcurrentReconciles, diff --git a/pkg/modules/assert.go b/pkg/modules/assert.go index 36194cc6..a19b185d 100644 --- a/pkg/modules/assert.go +++ b/pkg/modules/assert.go @@ -18,54 +18,89 @@ package modules import ( "context" + "errors" "fmt" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/variable" ) +type assertArgs struct { + that []string + successMsg string + failMsg string // high priority than msg + msg string +} + +func newAssertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*assertArgs, error) { + var err error + aa := &assertArgs{} + args := variable.Extension2Variables(raw) + if aa.that, err = variable.StringSliceVar(vars, args, "that"); err != nil { + return nil, errors.New("\"that\" should be []string or string") + } + aa.successMsg, _ = variable.StringVar(vars, args, "success_msg") + if aa.successMsg == "" { + aa.successMsg = StdoutTrue + } + aa.failMsg, _ = variable.StringVar(vars, args, "fail_msg") + aa.msg, _ = variable.StringVar(vars, args, "msg") + if aa.msg == "" { + aa.msg = StdoutFalse + } + + return aa, nil +} + +// ModuleAssert deal "assert" module func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) { // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + ha, err := options.getAllVariables() if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", err.Error() } - args := variable.Extension2Variables(options.Args) - thatParam, err := variable.StringSliceVar(ha.(map[string]any), args, "that") + aa, err := newAssertArgs(ctx, options.Args, ha) if err != nil { - return "", "\"that\" should be []string or string" + klog.V(4).ErrorS(err, "get assert args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + + return "", err.Error() } - ok, err := tmpl.ParseBool(ha.(map[string]any), thatParam) + ok, err := tmpl.ParseBool(ha, aa.that) if err != nil { return "", fmt.Sprintf("parse \"that\" error: %v", err) } - + // condition is true if ok { - if successMsgParam, err := variable.StringVar(ha.(map[string]any), args, "success_msg"); err == nil { - if r, err := tmpl.ParseString(ha.(map[string]any), successMsgParam); err != nil { - return "", fmt.Sprintf("parse \"success_msg\" error: %v", err) - } else { - return r, "" - } + r, err := tmpl.ParseString(ha, aa.successMsg) + if err == nil { + return r, "" } - return stdoutTrue, "" - } else { - if failMsgParam, err := variable.StringVar(ha.(map[string]any), args, "fail_msg"); err == nil { - if r, err := tmpl.ParseString(ha.(map[string]any), failMsgParam); err != nil { - return "", fmt.Sprintf("parse \"fail_msg\" error: %v", err) - } else { - return stdoutFalse, r - } - } - if msgParam, err := variable.StringVar(ha.(map[string]any), args, "msg"); err == nil { - if r, err := tmpl.ParseString(ha.(map[string]any), msgParam); err != nil { - return "", fmt.Sprintf("parse \"msg\" error: %v", err) - } else { - return stdoutFalse, r - } - } - return stdoutFalse, "False" + klog.V(4).ErrorS(err, "parse \"success_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + + return StdoutTrue, "" } + // condition is false and fail_msg is not empty + if aa.failMsg != "" { + r, err := tmpl.ParseString(ha, aa.failMsg) + if err == nil { + return StdoutFalse, r + } + klog.V(4).ErrorS(err, "parse \"fail_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + } + // condition is false and msg is not empty + if aa.msg != "" { + r, err := tmpl.ParseString(ha, aa.msg) + if err == nil { + return StdoutFalse, r + } + klog.V(4).ErrorS(err, "parse \"msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + } + + return StdoutFalse, "False" } diff --git a/pkg/modules/assert_test.go b/pkg/modules/assert_test.go index 51c57956..2a1afaa6 100644 --- a/pkg/modules/assert_test.go +++ b/pkg/modules/assert_test.go @@ -54,7 +54,7 @@ func TestAssert(t *testing.T) { }, }, }, - exceptStdout: stdoutTrue, + exceptStdout: StdoutTrue, }, { name: "success with success_msg", @@ -86,7 +86,7 @@ func TestAssert(t *testing.T) { }, }, }, - exceptStdout: stdoutFalse, + exceptStdout: StdoutFalse, exceptStderr: "False", }, { @@ -103,7 +103,7 @@ func TestAssert(t *testing.T) { }, }, }, - exceptStdout: stdoutFalse, + exceptStdout: StdoutFalse, exceptStderr: "failed v2", }, } @@ -112,6 +112,7 @@ func TestAssert(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleAssert(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/modules/command.go b/pkg/modules/command.go index 8af610a1..1161054e 100644 --- a/pkg/modules/command.go +++ b/pkg/modules/command.go @@ -18,27 +18,26 @@ package modules import ( "context" - "fmt" "strings" "github.com/kubesphere/kubekey/v4/pkg/variable" ) +// ModuleCommand deal "command" module. func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) { // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + ha, err := options.getAllVariables() if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", err.Error() } - // get connector - conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + conn, err := getConnector(ctx, options.Host, ha) if err != nil { return "", err.Error() } defer conn.Close(ctx) // command string - command, err := variable.Extension2String(ha.(map[string]any), options.Args) + command, err := variable.Extension2String(ha, options.Args) if err != nil { return "", err.Error() } @@ -51,5 +50,6 @@ func ModuleCommand(ctx context.Context, options ExecOptions) (string, string) { if data != nil { stdout = strings.TrimSuffix(string(data), "\n") } + return stdout, stderr } diff --git a/pkg/modules/command_test.go b/pkg/modules/command_test.go index ed36950c..e442189f 100644 --- a/pkg/modules/command_test.go +++ b/pkg/modules/command_test.go @@ -69,6 +69,7 @@ func TestCommand(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleCommand(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/modules/copy.go b/pkg/modules/copy.go index d5a619c6..85b6bc47 100644 --- a/pkg/modules/copy.go +++ b/pkg/modules/copy.go @@ -18,183 +18,272 @@ package modules import ( "context" + "errors" "fmt" "io/fs" "os" "path/filepath" "strings" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubesphere/kubekey/v4/pkg/connector" + kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" "github.com/kubesphere/kubekey/v4/pkg/project" "github.com/kubesphere/kubekey/v4/pkg/variable" ) +type copyArgs struct { + src string + content string + dest string + mode *int +} + +func newCopyArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*copyArgs, error) { + var err error + ca := ©Args{} + args := variable.Extension2Variables(raw) + ca.src, _ = variable.StringVar(vars, args, "src") + ca.content, _ = variable.StringVar(vars, args, "content") + ca.dest, err = variable.StringVar(vars, args, "dest") + if err != nil { + return nil, errors.New("\"dest\" in args should be string") + } + ca.mode, _ = variable.IntVar(vars, args, "mode") + + return ca, nil +} + +// ModuleCopy deal "copy" module func ModuleCopy(ctx context.Context, options ExecOptions) (string, string) { // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + ha, err := options.getAllVariables() if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", err.Error() } - // check args - // todo should add policy? - args := variable.Extension2Variables(options.Args) - srcParam, _ := variable.StringVar(ha.(map[string]any), args, "src") - contentParam, _ := variable.StringVar(ha.(map[string]any), args, "content") - destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + ca, err := newCopyArgs(ctx, options.Args, ha) if err != nil { - return "", "\"dest\" in args should be string" + klog.V(4).ErrorS(err, "get copy args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + + return "", err.Error() } // get connector - conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + conn, err := getConnector(ctx, options.Host, ha) if err != nil { return "", fmt.Sprintf("get connector error: %v", err) } defer conn.Close(ctx) switch { - case srcParam != "": // copy local file to remote - if filepath.IsAbs(srcParam) { // if src is absolute path. find it in local path - fileInfo, err := os.Stat(srcParam) - if err != nil { - return "", fmt.Sprintf(" get src file %s in local path error: %v", srcParam, err) - } - - if fileInfo.IsDir() { // src is dir - if err := filepath.WalkDir(srcParam, func(path string, d fs.DirEntry, err error) error { - if d.IsDir() { // only copy file - return nil - } - if err != nil { - return fmt.Errorf("walk dir %s error: %w", srcParam, err) - } - - // get file old mode - info, err := d.Info() - if err != nil { - return fmt.Errorf("get file info error: %w", err) - } - mode := info.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - // read file - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("read file error: %w", err) - } - // copy file to remote - var destFilename = destParam - if strings.HasSuffix(destParam, "/") { - rel, err := filepath.Rel(srcParam, path) - if err != nil { - return fmt.Errorf("get relative file path error: %w", err) - } - destFilename = filepath.Join(destParam, rel) - } - if err := conn.PutFile(ctx, data, destFilename, mode); err != nil { - return fmt.Errorf("copy file error: %w", err) - } - return nil - }); err != nil { - return "", fmt.Sprintf(" walk dir %s in local path error: %v", srcParam, err) - } - } else { // src is file - data, err := os.ReadFile(srcParam) - if err != nil { - return "", fmt.Sprintf("read file error: %v", err) - } - if strings.HasSuffix(destParam, "/") { - destParam += filepath.Base(srcParam) - } - mode := fileInfo.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - if err := conn.PutFile(ctx, data, destParam, mode); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) - } - } - } else { // if src is not absolute path. find file in project - pj, err := project.New(options.Pipeline, false) - if err != nil { - return "", fmt.Sprintf("get project error: %v", err) - } - fileInfo, err := pj.Stat(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return "", fmt.Sprintf("get file %s from project error %v", srcParam, err) - } - - if fileInfo.IsDir() { - if err := pj.WalkDir(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}, func(path string, d fs.DirEntry, err error) error { - if d.IsDir() { // only copy file - return nil - } - if err != nil { - return fmt.Errorf("walk dir %s error: %w", srcParam, err) - } - - info, err := d.Info() - if err != nil { - return fmt.Errorf("get file info error: %w", err) - } - mode := info.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - data, err := pj.ReadFile(path, project.GetFileOption{Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return fmt.Errorf("read file error: %w", err) - } - var destFilename = destParam - if strings.HasSuffix(destParam, "/") { - rel, err := pj.Rel(srcParam, path, project.GetFileOption{Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return fmt.Errorf("get relative file path error: %w", err) - } - destFilename = filepath.Join(destParam, rel) - } - if err := conn.PutFile(ctx, data, destFilename, mode); err != nil { - return fmt.Errorf("copy file error: %w", err) - } - return nil - }); err != nil { - return "", fmt.Sprintf(" walk dir %s in local path error: %v", srcParam, err) - } - } else { - data, err := pj.ReadFile(srcParam, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return "", fmt.Sprintf("read file error: %v", err) - } - if strings.HasSuffix(destParam, "/") { - destParam += filepath.Base(srcParam) - } - mode := fileInfo.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - if err := conn.PutFile(ctx, data, destParam, mode); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) - } - } - } - return stdoutSuccess, "" - - case contentParam != "": // convert content param and copy to remote - if strings.HasSuffix(destParam, "/") { - return "", "\"content\" should copy to a file" - } - mode := os.ModePerm - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - - if err := conn.PutFile(ctx, []byte(contentParam), destParam, mode); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) - } - return stdoutSuccess, "" + case ca.src != "": // copy local file to remote + return ca.copySrc(ctx, options, conn) + case ca.content != "": + return ca.copyContent(ctx, os.ModePerm, conn) default: return "", "either \"src\" or \"content\" must be provided." } } + +// copySrc copy src file to dest +func (ca copyArgs) copySrc(ctx context.Context, options ExecOptions, conn connector.Connector) (string, string) { + if filepath.IsAbs(ca.src) { // if src is absolute path. find it in local path + fileInfo, err := os.Stat(ca.src) + if err != nil { + return "", fmt.Sprintf(" get src file %s in local path error: %v", ca.src, err) + } + + if fileInfo.IsDir() { // src is dir + if err := ca.absDir(ctx, conn); err != nil { + return "", fmt.Sprintf("sync copy absolute dir error %s", err) + } + } else { // src is file + if err := ca.absFile(ctx, fileInfo.Mode(), conn); err != nil { + return "", fmt.Sprintf("sync copy absolute dir error %s", err) + } + } + } else { // if src is not absolute path. find file in project + pj, err := project.New(ctx, options.Pipeline, false) + if err != nil { + return "", fmt.Sprintf("get project error: %v", err) + } + + fileInfo, err := pj.Stat(ca.src, project.GetFileOption{IsFile: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) + if err != nil { + return "", fmt.Sprintf("get file %s from project error %v", ca.src, err) + } + + if fileInfo.IsDir() { + if err := ca.relDir(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], conn); err != nil { + return "", fmt.Sprintf("sync copy relative dir error %s", err) + } + } else { + if err := ca.relFile(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], fileInfo.Mode(), conn); err != nil { + return "", fmt.Sprintf("sync copy relative dir error %s", err) + } + } + } + + return StdoutSuccess, "" +} + +// copyContent convert content param and copy to dest +func (ca copyArgs) copyContent(ctx context.Context, mode fs.FileMode, conn connector.Connector) (string, string) { + if strings.HasSuffix(ca.dest, "/") { + return "", "\"content\" should copy to a file" + } + + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + if err := conn.PutFile(ctx, []byte(ca.content), ca.dest, mode); err != nil { + return "", fmt.Sprintf("copy file error: %v", err) + } + + return StdoutSuccess, "" +} + +// relFile when copy.src is relative dir, get all files from project, and copy to remote. +func (ca copyArgs) relFile(ctx context.Context, pj project.Project, role string, mode fs.FileMode, conn connector.Connector) any { + data, err := pj.ReadFile(ca.src, project.GetFileOption{IsFile: true, Role: role}) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + dest = filepath.Join(ca.dest, filepath.Base(ca.src)) + } + + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + if err := conn.PutFile(ctx, data, dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil +} + +// relDir when copy.src is relative dir, get all files from project, and copy to remote. +func (ca copyArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector) error { + if err := pj.WalkDir(ca.src, project.GetFileOption{IsFile: true, Role: role}, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %w", ca.src, err) + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %w", err) + } + + mode := info.Mode() + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + data, err := pj.ReadFile(path, project.GetFileOption{Role: role}) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + rel, err := pj.Rel(ca.src, path, project.GetFileOption{Role: role}) + if err != nil { + return fmt.Errorf("get relative file path error: %w", err) + } + dest = filepath.Join(ca.dest, rel) + } + + if err := conn.PutFile(ctx, data, dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil + }); err != nil { + return err + } + + return nil +} + +// absFile when copy.src is absolute file, get file from os, and copy to remote. +func (ca copyArgs) absFile(ctx context.Context, mode fs.FileMode, conn connector.Connector) error { + data, err := os.ReadFile(ca.src) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + dest = filepath.Join(ca.dest, filepath.Base(ca.src)) + } + + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + + if err := conn.PutFile(ctx, data, dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil +} + +// absDir when copy.src is absolute dir, get all files from os, and copy to remote. +func (ca copyArgs) absDir(ctx context.Context, conn connector.Connector) error { + if err := filepath.WalkDir(ca.src, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + + if err != nil { + return fmt.Errorf("walk dir %s error: %w", ca.src, err) + } + // get file old mode + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %w", err) + } + + mode := info.Mode() + if ca.mode != nil { + mode = os.FileMode(*ca.mode) + } + // read file + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + // copy file to remote + dest := ca.dest + if strings.HasSuffix(ca.dest, "/") { + rel, err := filepath.Rel(ca.src, path) + if err != nil { + return fmt.Errorf("get relative file path error: %w", err) + } + dest = filepath.Join(ca.dest, rel) + } + + if err := conn.PutFile(ctx, data, dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil + }); err != nil { + return err + } + + return nil +} diff --git a/pkg/modules/copy_test.go b/pkg/modules/copy_test.go index f006d907..3cde7e82 100644 --- a/pkg/modules/copy_test.go +++ b/pkg/modules/copy_test.go @@ -87,7 +87,7 @@ func TestCopy(t *testing.T) { ctxFunc: func() context.Context { return context.WithValue(context.Background(), ConnKey, successConnector) }, - exceptStdout: stdoutSuccess, + exceptStdout: StdoutSuccess, }, { name: "copy failed", @@ -109,6 +109,7 @@ func TestCopy(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleCopy(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/modules/debug.go b/pkg/modules/debug.go index e6544724..4a260677 100644 --- a/pkg/modules/debug.go +++ b/pkg/modules/debug.go @@ -24,24 +24,26 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/variable" ) -func ModuleDebug(ctx context.Context, options ExecOptions) (string, string) { +// ModuleDebug deal "debug" module +func ModuleDebug(_ context.Context, options ExecOptions) (string, string) { // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + ha, err := options.getAllVariables() if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", err.Error() } args := variable.Extension2Variables(options.Args) // var is defined. return the value of var - if varParam, err := variable.StringVar(ha.(map[string]any), args, "var"); err == nil { - result, err := tmpl.ParseString(ha.(map[string]any), fmt.Sprintf("{{ %s }}", varParam)) + if varParam, err := variable.StringVar(ha, args, "var"); err == nil { + result, err := tmpl.ParseString(ha, fmt.Sprintf("{{ %s }}", varParam)) if err != nil { return "", fmt.Sprintf("failed to parse var: %v", err) } + return result, "" } // msg is defined. return the actual msg - if msgParam, err := variable.StringVar(ha.(map[string]any), args, "msg"); err == nil { + if msgParam, err := variable.StringVar(ha, args, "msg"); err == nil { return msgParam, "" } diff --git a/pkg/modules/debug_test.go b/pkg/modules/debug_test.go index 294a8547..a5226e0b 100644 --- a/pkg/modules/debug_test.go +++ b/pkg/modules/debug_test.go @@ -77,6 +77,7 @@ func TestDebug(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleDebug(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/modules/fetch.go b/pkg/modules/fetch.go index 2d1146bc..05145510 100644 --- a/pkg/modules/fetch.go +++ b/pkg/modules/fetch.go @@ -27,26 +27,26 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/variable" ) +// ModuleFetch deal fetch module func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) { // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) + ha, err := options.getAllVariables() if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", err.Error() } - // check args args := variable.Extension2Variables(options.Args) - srcParam, err := variable.StringVar(ha.(map[string]any), args, "src") + srcParam, err := variable.StringVar(ha, args, "src") if err != nil { return "", "\"src\" in args should be string" } - destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + destParam, err := variable.StringVar(ha, args, "dest") if err != nil { return "", "\"dest\" in args should be string" } // get connector - conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + conn, err := getConnector(ctx, options.Host, ha) if err != nil { return "", fmt.Sprintf("get connector error: %v", err) } @@ -58,9 +58,11 @@ func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) { return "", fmt.Sprintf("failed to create dest dir: %v", err) } } + destFile, err := os.Create(destParam) if err != nil { klog.V(4).ErrorS(err, "failed to create dest file") + return "", err.Error() } defer destFile.Close() @@ -68,5 +70,6 @@ func ModuleFetch(ctx context.Context, options ExecOptions) (string, string) { if err := conn.FetchFile(ctx, srcParam, destFile); err != nil { return "", fmt.Sprintf("failed to fetch file: %v", err) } - return stdoutSuccess, "" + + return StdoutSuccess, "" } diff --git a/pkg/modules/fetch_test.go b/pkg/modules/fetch_test.go index 7c4ae63b..537a6c21 100644 --- a/pkg/modules/fetch_test.go +++ b/pkg/modules/fetch_test.go @@ -65,6 +65,7 @@ func TestFetch(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleFetch(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/modules/gen_cert.go b/pkg/modules/gen_cert.go index 25191830..3ea26ee1 100644 --- a/pkg/modules/gen_cert.go +++ b/pkg/modules/gen_cert.go @@ -9,6 +9,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "math" "math/big" @@ -16,7 +17,7 @@ import ( "os" "time" - "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" cgutilcert "k8s.io/client-go/util/cert" @@ -41,116 +42,154 @@ const ( policyIfNotPresent = "IfNotPresent" ) -// ModuleGenCert generate cert file. -// if root_key and root_cert is empty, generate Self-signed certificate. -func ModuleGenCert(ctx context.Context, options ExecOptions) (stdout string, stderr string) { - // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) +var defaultAltName = &cgutilcert.AltNames{ + DNSNames: []string{"localhost"}, + IPs: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}, +} + +type genCertArgs struct { + rootKey string + rootCert string + date time.Duration + policy string + sans []string + cn string + outKey string + outCert string +} + +// signedCertificate generate certificate signed by root certificate +func (gca genCertArgs) signedCertificate(cfg *cgutilcert.Config) (string, string) { + parentKey, err := TryLoadKeyFromDisk(gca.rootKey) if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + return "", fmt.Sprintf("failed to load root key: %v", err) + } + parentCert, _, err := TryLoadCertChainFromDisk(gca.rootCert) + if err != nil { + return "", fmt.Sprintf("failed to load root certificate: %v", err) } - // args - args := variable.Extension2Variables(options.Args) - rootKeyParam, _ := variable.StringVar(ha.(map[string]any), args, "root_key") - rootCertParam, _ := variable.StringVar(ha.(map[string]any), args, "root_cert") - dateParam, _ := variable.StringVar(ha.(map[string]any), args, "date") - policyParam, _ := variable.StringVar(ha.(map[string]any), args, "policy") - sansParam, _ := variable.StringSliceVar(ha.(map[string]any), args, "sans") - cnParam, _ := variable.StringVar(ha.(map[string]any), args, "cn") - outKeyParam, _ := variable.StringVar(ha.(map[string]any), args, "out_key") - outCertParam, _ := variable.StringVar(ha.(map[string]any), args, "out_cert") - // check args - if policyParam != policyAlways && policyParam != policyIfNotPresent { - return "", "\"policy\" should be one of [Always, IfNotPresent]" + if gca.policy == policyIfNotPresent { + if _, err := TryLoadKeyFromDisk(gca.outKey); err != nil { + klog.V(4).InfoS("Failed to load out key, new it") + + goto NEW + } + + existCert, intermediates, err := TryLoadCertChainFromDisk(gca.outCert) + if err != nil { + klog.V(4).InfoS("Failed to load out cert, new it") + + goto NEW + } + // check if the existing key and cert match the root key and cert + if err := ValidateCertPeriod(existCert, 0); err != nil { + return "", fmt.Sprintf("failed to ValidateCertPeriod: %v", err) + } + if err := VerifyCertChain(existCert, intermediates, parentCert); err != nil { + return "", fmt.Sprintf("failed to VerifyCertChain: %v", err) + } + if err := validateCertificateWithConfig(existCert, gca.outCert, cfg); err != nil { + return "", fmt.Sprintf("failed to validateCertificateWithConfig: %v", err) + } + + return StdoutSkip, "" } - if outKeyParam == "" || outCertParam == "" { - return "", "\"out_key\" or \"out_cert\" in args should be string" +NEW: + newKey, err := rsa.GenerateKey(cryptorand.Reader, rsaKeySize) + if err != nil { + return "", fmt.Sprintf("generate rsa key error: %v", err) } - if cnParam == "" { - return "", "\"cn\" in args should be string" + newCert, err := NewSignedCert(*cfg, gca.date, newKey, parentCert, parentKey, true) + if err != nil { + return "", fmt.Sprintf("failed to generate certificate: %v", err) } - altName := &cgutilcert.AltNames{ - DNSNames: []string{"localhost"}, - IPs: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}, + // write key and cert to file + if err := WriteKey(gca.outKey, newKey, gca.policy); err != nil { + return "", fmt.Sprintf("failed to write key: %v", err) } - appendSANsToAltNames(altName, sansParam, outCertParam) - cfg := &cgutilcert.Config{ - CommonName: cnParam, - Organization: []string{"kubekey"}, - AltNames: *altName, + if err := WriteCert(gca.outCert, newCert, gca.policy); err != nil { + return "", fmt.Sprintf("failed to write certificate: %v", err) } - var newKey *rsa.PrivateKey - var newCert *x509.Certificate - newKey, err = rsa.GenerateKey(cryptorand.Reader, rsaKeySize) + return StdoutSuccess, "" +} + +// selfSignedCertificate generate Self-signed certificate +func (gca genCertArgs) selfSignedCertificate(cfg *cgutilcert.Config) (string, string) { + newKey, err := rsa.GenerateKey(cryptorand.Reader, rsaKeySize) if err != nil { return "", fmt.Sprintf("generate rsa key error: %v", err) } - var after time.Duration - // change expiration date - if dateParam != "" { - dur, err := time.ParseDuration(dateParam) - if err != nil { - return "", fmt.Sprintf("failed to parse duration: %v", err) - } - after = dur + newCert, err := NewSelfSignedCACert(*cfg, gca.date, newKey) + if err != nil { + return "", fmt.Sprintf("failed to generate self-signed certificate: %v", err) + } + // write key and cert to file + if err := WriteKey(gca.outKey, newKey, gca.policy); err != nil { + return "", fmt.Sprintf("failed to write key: %v", err) + } + if err := WriteCert(gca.outCert, newCert, gca.policy); err != nil { + return "", fmt.Sprintf("failed to write certificate: %v", err) + } + + return StdoutSuccess, "" +} + +func newGenCertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*genCertArgs, error) { + gca := &genCertArgs{} + // args + args := variable.Extension2Variables(raw) + gca.rootKey, _ = variable.StringVar(vars, args, "root_key") + gca.rootCert, _ = variable.StringVar(vars, args, "root_cert") + gca.date, _ = variable.DurationVar(vars, args, "date") + gca.policy, _ = variable.StringVar(vars, args, "policy") + gca.sans, _ = variable.StringSliceVar(vars, args, "sans") + gca.cn, _ = variable.StringVar(vars, args, "cn") + gca.outKey, _ = variable.StringVar(vars, args, "out_key") + gca.outCert, _ = variable.StringVar(vars, args, "out_cert") + // check args + if gca.policy != policyAlways && gca.policy != policyIfNotPresent { + return nil, errors.New("\"policy\" should be one of [Always, IfNotPresent]") + } + if gca.outKey == "" || gca.outCert == "" { + return nil, errors.New("\"out_key\" or \"out_cert\" in args should be string") + } + if gca.cn == "" { + return nil, errors.New("\"cn\" in args should be string") + } + + return gca, nil +} + +// ModuleGenCert generate cert file. +// if root_key and root_cert is empty, generate Self-signed certificate. +func ModuleGenCert(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.getAllVariables() + if err != nil { + return "", err.Error() + } + + gca, err := newGenCertArgs(ctx, options.Args, ha) + if err != nil { + return "", err.Error() + } + + cfg := &cgutilcert.Config{ + CommonName: gca.cn, + Organization: []string{"kubekey"}, + AltNames: appendSANsToAltNames(defaultAltName, gca.sans), } switch { - case rootKeyParam == "" || rootCertParam == "": // generate Self-signed certificate - newCert, err = NewSelfSignedCACert(*cfg, after, newKey) - if err != nil { - return "", fmt.Sprintf("failed to generate Self-signed certificate: %v", err) - } - default: // generate certificate signed by root certificate - parentKey, err := TryLoadKeyFromDisk(rootKeyParam) - if err != nil { - return "", fmt.Sprintf("failed to load root key: %v", err) - } - parentCert, _, err := TryLoadCertChainFromDisk(rootCertParam) - if err != nil { - return "", fmt.Sprintf("failed to load root certificate: %v", err) - } - if policyParam == policyIfNotPresent { - if _, err := TryLoadKeyFromDisk(outKeyParam); err != nil { - klog.V(4).InfoS("Failed to load out key, new it") - goto NEW - } - existCert, intermediates, err := TryLoadCertChainFromDisk(outCertParam) - if err != nil { - klog.V(4).InfoS("Failed to load out cert, new it") - goto NEW - } - // check if the existing key and cert match the root key and cert - if err := ValidateCertPeriod(existCert, 0); err != nil { - return "", fmt.Sprintf("failed to ValidateCertPeriod: %v", err) - } - if err := VerifyCertChain(existCert, intermediates, parentCert); err != nil { - return "", fmt.Sprintf("failed to VerifyCertChain: %v", err) - } - if err := validateCertificateWithConfig(existCert, outCertParam, cfg); err != nil { - return "", fmt.Sprintf("failed to validateCertificateWithConfig: %v", err) - } - return stdoutSkip, "" - } - NEW: - newCert, err = NewSignedCert(*cfg, after, newKey, parentCert, parentKey, true) - if err != nil { - return "", fmt.Sprintf("failed to generate certificate: %v", err) - } + case gca.rootKey == "" || gca.rootCert == "": + return gca.selfSignedCertificate(cfg) + default: + return gca.signedCertificate(cfg) } - - // write key and cert to file - if err := WriteKey(outKeyParam, newKey, policyParam); err != nil { - return "", fmt.Sprintf("failed to write key: %v", err) - } - if err := WriteCert(outCertParam, newCert, policyParam); err != nil { - return "", fmt.Sprintf("failed to write certificate: %v", err) - } - return stdoutSuccess, "" } // WriteKey stores the given key at the given location @@ -197,6 +236,7 @@ func EncodeCertPEM(cert *x509.Certificate) []byte { Type: certificateBlockType, Bytes: cert.Raw, } + return pem.EncodeToMemory(&block) } @@ -241,7 +281,7 @@ func TryLoadCertChainFromDisk(rootCert string) (*x509.Certificate, []*x509.Certi // RFC-1123 compliant DNS strings are added to altNames.DNSNames as strings // RFC-1123 compliant wildcard DNS strings are added to altNames.DNSNames as strings // certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for -func appendSANsToAltNames(altNames *cgutilcert.AltNames, sans []string, certName string) { +func appendSANsToAltNames(altNames *cgutilcert.AltNames, sans []string) cgutilcert.AltNames { for _, altname := range sans { if ip := netutils.ParseIPSloppy(altname); ip != nil { altNames.IPs = append(altNames.IPs, ip) @@ -251,12 +291,13 @@ func appendSANsToAltNames(altNames *cgutilcert.AltNames, sans []string, certName altNames.DNSNames = append(altNames.DNSNames, altname) } else { klog.V(4).Infof( - "[certificates] WARNING: '%s' was not added to the '%s' SAN, because it is not a valid IP or RFC-1123 compliant DNS entry\n", + "[certificates] WARNING: Added to the '%s' SAN failed, because it is not a valid IP or RFC-1123 compliant DNS entry\n", altname, - certName, ) } } + + return *altNames } // NewSelfSignedCACert creates a CA certificate @@ -268,6 +309,7 @@ func NewSelfSignedCACert(cfg cgutilcert.Config, after time.Duration, key crypto. return nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) + notBefore := now.UTC() if !cfg.NotBefore.IsZero() { notBefore = cfg.NotBefore.UTC() @@ -294,6 +336,7 @@ func NewSelfSignedCACert(cfg cgutilcert.Config, after time.Duration, key crypto. if err != nil { return nil, err } + return x509.ParseCertificate(certDERBytes) } @@ -305,8 +348,9 @@ func NewSignedCert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer return nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) + if cfg.CommonName == "" { - return nil, fmt.Errorf("must specify a CommonName") + return nil, errors.New("must specify a CommonName") } keyUsage := x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature @@ -335,10 +379,12 @@ func NewSignedCert(cfg cgutilcert.Config, after time.Duration, key crypto.Signer BasicConstraintsValid: true, IsCA: isCA, } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) if err != nil { return nil, err } + return x509.ParseCertificate(certDERBytes) } @@ -374,6 +420,7 @@ func ValidateCertPeriod(cert *x509.Certificate, offset time.Duration) error { if now.After(cert.NotAfter) { return fmt.Errorf("the certificate has expired: %s", period) } + return nil } @@ -409,10 +456,12 @@ func validateCertificateWithConfig(cert *x509.Certificate, baseName string, cfg return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err) } } + for _, ipAddress := range cfg.AltNames.IPs { if err := cert.VerifyHostname(ipAddress.String()); err != nil { return fmt.Errorf("certificate %s is invalid, error: %w", baseName, err) } } + return nil } diff --git a/pkg/modules/image.go b/pkg/modules/image.go index 56d48dbc..c396451f 100644 --- a/pkg/modules/image.go +++ b/pkg/modules/image.go @@ -19,7 +19,9 @@ package modules import ( "bytes" "context" + "crypto/tls" "encoding/json" + "errors" "fmt" "io" "io/fs" @@ -28,115 +30,236 @@ import ( "path/filepath" "strings" + "k8s.io/apimachinery/pkg/runtime" + imagev1 "github.com/opencontainers/image-spec/specs-go/v1" "k8s.io/klog/v2" "oras.land/oras-go/v2" "oras.land/oras-go/v2/registry" "oras.land/oras-go/v2/registry/remote" "oras.land/oras-go/v2/registry/remote/auth" - "oras.land/oras-go/v2/registry/remote/retry" _const "github.com/kubesphere/kubekey/v4/pkg/const" "github.com/kubesphere/kubekey/v4/pkg/variable" ) -func ModuleImage(ctx context.Context, options ExecOptions) (stdout string, stderr string) { - // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) - if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) - } - - // check args - args := variable.Extension2Variables(options.Args) - pullParam, _ := variable.StringSliceVar(ha.(map[string]any), args, "pull") - // if namespace_override is not empty, it will override the image manifests namespace_override. (namespace maybe multi sub path) - // push to private registry - pushParam := args["push"] - - // pull image manifests to local dir - for _, img := range pullParam { - src, err := remote.NewRepository(img) - if err != nil { - return "", fmt.Sprintf("failed to get remote image: %v", err) - } - dst, err := NewLocalRepository(filepath.Join(domain, src.Reference.Repository) + ":" + src.Reference.Reference) - if err != nil { - return "", fmt.Sprintf("failed to get local image: %v", err) - } - if _, err = oras.Copy(context.Background(), src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil { - return "", fmt.Sprintf("failed to copy image: %v", err) - } - } - - // push image to private registry - if pushParam != nil { - registry, _ := variable.StringVar(ha.(map[string]any), pushParam.(map[string]any), "registry") - username, _ := variable.StringVar(ha.(map[string]any), pushParam.(map[string]any), "username") - password, _ := variable.StringVar(ha.(map[string]any), pushParam.(map[string]any), "password") - namespace, _ := variable.StringVar(ha.(map[string]any), pushParam.(map[string]any), "namespace_override") - - manifests, err := findLocalImageManifests(filepath.Join(_const.GetWorkDir(), "kubekey", "images")) - if err != nil { - return "", fmt.Sprintf("failed to find local image manifests: %v", err) - } - for _, img := range manifests { - src, err := NewLocalRepository(filepath.Join(domain, img)) - if err != nil { - return "", fmt.Sprintf("failed to get local image: %v", err) - } - repo := src.Reference.Repository - if namespace != "" { - repo = filepath.Join(namespace, filepath.Base(repo)) - } - dst, err := remote.NewRepository(filepath.Join(registry, repo) + ":" + src.Reference.Reference) - if err != nil { - return "", fmt.Sprintf("failed to get local image: %v", err) - } - dst.Client = &auth.Client{ - Client: retry.DefaultClient, - Cache: auth.NewCache(), - Credential: auth.StaticCredential(registry, auth.Credential{ - Username: username, - Password: password, - }), - } - - if _, err = oras.Copy(context.Background(), src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil { - return "", fmt.Sprintf("failed to copy image: %v", err) - } - } - } - - return stdoutSuccess, "" +type imageArgs struct { + pull *imagePullArgs + push *imagePushArgs } +type imagePullArgs struct { + manifests []string + skipTLSVerify *bool + username string + password string +} + +func (i imagePullArgs) pull(ctx context.Context) error { + for _, img := range i.manifests { + src, err := remote.NewRepository(img) + if err != nil { + return fmt.Errorf("failed to get remote image: %w", err) + } + src.Client = &auth.Client{ + Client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: *i.skipTLSVerify, + }, + }, + }, + Cache: auth.NewCache(), + Credential: auth.StaticCredential(src.Reference.Registry, auth.Credential{ + Username: i.username, + Password: i.password, + }), + } + + dst, err := newLocalRepository(filepath.Join(domain, src.Reference.Repository)+":"+src.Reference.Reference, + filepath.Join(_const.GetWorkDir(), _const.ArtifactDir, _const.ArtifactImagesDir)) + if err != nil { + return fmt.Errorf("failed to get local image: %w", err) + } + + if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil { + return fmt.Errorf("failed to copy image: %w", err) + } + } + + return nil +} + +type imagePushArgs struct { + imagesDir string + skipTLSVerify *bool + registry string + username string + password string + namespace string +} + +// push local dir images to remote registry +func (i imagePushArgs) push(ctx context.Context) error { + manifests, err := findLocalImageManifests(i.imagesDir) + klog.V(5).Info("manifests found", "manifests", manifests) + if err != nil { + return fmt.Errorf("failed to find local image manifests: %w", err) + } + + for _, img := range manifests { + src, err := newLocalRepository(filepath.Join(domain, img), i.imagesDir) + if err != nil { + return fmt.Errorf("failed to get local image: %w", err) + } + repo := src.Reference.Repository + if i.namespace != "" { + repo = filepath.Join(i.namespace, filepath.Base(repo)) + } + + dst, err := remote.NewRepository(filepath.Join(i.registry, repo) + ":" + src.Reference.Reference) + if err != nil { + return fmt.Errorf("failed to get remote repo: %w", err) + } + dst.Client = &auth.Client{ + Client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: *i.skipTLSVerify, + }, + }, + }, + Cache: auth.NewCache(), + Credential: auth.StaticCredential(i.registry, auth.Credential{ + Username: i.username, + Password: i.password, + }), + } + + if _, err = oras.Copy(ctx, src, src.Reference.Reference, dst, "", oras.DefaultCopyOptions); err != nil { + return fmt.Errorf("failed to copy image: %w", err) + } + } + + return nil +} + +func newImageArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*imageArgs, error) { + ia := &imageArgs{} + // check args + args := variable.Extension2Variables(raw) + if pullArgs, ok := args["pull"]; ok { + pull, ok := pullArgs.(map[string]any) + if !ok { + return nil, errors.New("\"pull\" should be map") + } + ipl := &imagePullArgs{} + ipl.manifests, _ = variable.StringSliceVar(vars, pull, "manifests") + ipl.username, _ = variable.StringVar(vars, pull, "username") + ipl.password, _ = variable.StringVar(vars, pull, "password") + ipl.skipTLSVerify, _ = variable.BoolVar(vars, pull, "skipTLSVerify") + // check args + if len(ipl.manifests) == 0 { + return nil, errors.New("\"pull.manifests\" is required") + } + } + // if namespace_override is not empty, it will override the image manifests namespace_override. (namespace maybe multi sub path) + // push to private registry + if pushArgs, ok := args["push"]; ok { + push, ok := pushArgs.(map[string]any) + if !ok { + return nil, errors.New("\"push\" should be map") + } + + ips := &imagePushArgs{} + ips.registry, _ = variable.StringVar(vars, push, "registry") + ips.username, _ = variable.StringVar(vars, push, "username") + ips.password, _ = variable.StringVar(vars, push, "password") + ips.namespace, _ = variable.StringVar(vars, push, "namespace_override") + ips.imagesDir, _ = variable.StringVar(vars, push, "images_dir") + ips.skipTLSVerify, _ = variable.BoolVar(vars, push, "skipTLSVerify") + // check args + if ips.registry == "" { + return nil, errors.New("\"push.registry\" is required") + } + if ips.imagesDir == "" { + return nil, errors.New("\"push.images_dir\" is required") + } + ia.push = ips + } + + return ia, nil +} + +// ModuleImage deal "image" module +func ModuleImage(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.getAllVariables() + if err != nil { + return "", err.Error() + } + + ia, err := newImageArgs(ctx, options.Args, ha) + if err != nil { + return "", err.Error() + } + + // pull image manifests to local dir + if ia.pull != nil { + if err := ia.pull.pull(ctx); err != nil { + return "", fmt.Sprintf("failed to pull image: %v", err) + } + } + // push image to private registry + if ia.push != nil { + if err := ia.push.push(ctx); err != nil { + return "", fmt.Sprintf("failed to push image: %v", err) + } + } + + return StdoutSuccess, "" +} + +// findLocalImageManifests get image manifests with whole image's name. func findLocalImageManifests(localDir string) ([]string, error) { if _, err := os.Stat(localDir); err != nil { + klog.V(4).ErrorS(err, "failed to stat local directory", "image_dir", localDir) // images is not exist, skip - klog.V(4).ErrorS(err, "failed to stat local directory") - return nil, nil + return make([]string, 0), nil } + var manifests []string if err := filepath.WalkDir(localDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } + if path == filepath.Join(localDir, "blobs") { return filepath.SkipDir } + if d.IsDir() || filepath.Base(path) == "manifests" { return nil } + file, err := os.ReadFile(path) if err != nil { return err } + var data map[string]any if err := json.Unmarshal(file, &data); err != nil { - return err + // skip un except file (empty) + klog.V(4).ErrorS(err, "unmarshal manifests file error", "file", path) + + return nil } - if data["mediaType"].(string) == imagev1.MediaTypeImageIndex { + + mediaType, ok := data["mediaType"].(string) + if !ok { + return errors.New("invalid mediaType") + } + if mediaType == imagev1.MediaTypeImageIndex || mediaType == "application/vnd.docker.distribution.manifest.list.v2+json" { subpath, err := filepath.Rel(localDir, path) if err != nil { return err @@ -144,6 +267,7 @@ func findLocalImageManifests(localDir string) ([]string, error) { // the last dir is manifests. should delete it manifests = append(manifests, filepath.Dir(filepath.Dir(subpath))+":"+filepath.Base(subpath)) } + return nil }); err != nil { return nil, err @@ -152,22 +276,24 @@ func findLocalImageManifests(localDir string) ([]string, error) { return manifests, nil } -func NewLocalRepository(reference string) (*remote.Repository, error) { +// newLocalRepository local dir images repository +func newLocalRepository(reference, localDir string) (*remote.Repository, error) { ref, err := registry.ParseReference(reference) if err != nil { return nil, err } + return &remote.Repository{ Reference: ref, - Client: &http.Client{Transport: &imageTransport{baseDir: filepath.Join(_const.GetWorkDir(), "kubekey", "images")}}, + Client: &http.Client{Transport: &imageTransport{baseDir: localDir}}, }, nil } -var ResponseNotFound = &http.Response{Proto: "Local", StatusCode: http.StatusNotFound} -var ResponseNotAllowed = &http.Response{Proto: "Local", StatusCode: http.StatusMethodNotAllowed} -var ResponseServerError = &http.Response{Proto: "Local", StatusCode: http.StatusInternalServerError} -var ResponseCreated = &http.Response{Proto: "Local", StatusCode: http.StatusCreated} -var ResponseOK = &http.Response{Proto: "Local", StatusCode: http.StatusOK} +var responseNotFound = &http.Response{Proto: "Local", StatusCode: http.StatusNotFound} +var responseNotAllowed = &http.Response{Proto: "Local", StatusCode: http.StatusMethodNotAllowed} +var responseServerError = &http.Response{Proto: "Local", StatusCode: http.StatusInternalServerError} +var responseCreated = &http.Response{Proto: "Local", StatusCode: http.StatusCreated} +var responseOK = &http.Response{Proto: "Local", StatusCode: http.StatusOK} const domain = "internal" const apiPrefix = "/v2/" @@ -176,127 +302,172 @@ type imageTransport struct { baseDir string } +// RoundTrip deal http.Request in local dir images. func (i imageTransport) RoundTrip(request *http.Request) (*http.Response, error) { switch request.Method { case http.MethodHead: // check if file exist - if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs - filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path)) - if _, err := os.Stat(filename); err != nil { - return ResponseNotFound, nil - } - return ResponseOK, nil - } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests - filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) - if _, err := os.Stat(filename); err != nil { - return ResponseNotFound, nil - } - file, err := os.ReadFile(filename) - if err != nil { - return ResponseServerError, err - } - var data map[string]any - if err := json.Unmarshal(file, &data); err != nil { - return ResponseServerError, err - } - - return &http.Response{ - Proto: "Local", - StatusCode: http.StatusOK, - Header: http.Header{ - "Content-Type": []string{data["mediaType"].(string)}, - }, - ContentLength: int64(len(file)), - }, nil - } - return ResponseNotAllowed, nil + return i.head(request) case http.MethodPost: - if strings.HasSuffix(request.URL.Path, "/uploads/") { - return &http.Response{ - Proto: "Local", - StatusCode: http.StatusAccepted, - Header: http.Header{ - "Location": []string{filepath.Dir(request.URL.Path)}, - }, - Request: request, - }, nil - } - return ResponseNotAllowed, nil + return i.post(request) case http.MethodPut: - if strings.HasSuffix(request.URL.Path, "/uploads") { // blobs - body, err := io.ReadAll(request.Body) - if err != nil { - return ResponseServerError, nil - } - defer request.Body.Close() - - filename := filepath.Join(i.baseDir, "blobs", request.URL.Query().Get("digest")) - if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil { - return ResponseServerError, nil - } - if err := os.WriteFile(filename, body, os.ModePerm); err != nil { - return ResponseServerError, nil - } - return ResponseCreated, nil - } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "/manifests") { // manifest - filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) - if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil { - return ResponseServerError, nil - } - body, err := io.ReadAll(request.Body) - if err != nil { - return ResponseServerError, nil - } - defer request.Body.Close() - if err := os.WriteFile(filename, body, os.ModePerm); err != nil { - return ResponseServerError, nil - } - return ResponseCreated, nil - } - - return ResponseNotAllowed, nil + return i.put(request) case http.MethodGet: - if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs - filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path)) - if _, err := os.Stat(filename); err != nil { - return ResponseNotFound, nil - } - file, err := os.ReadFile(filename) - if err != nil { - return ResponseServerError, err - } - - return &http.Response{ - Proto: "Local", - StatusCode: http.StatusOK, - ContentLength: int64(len(file)), - Body: io.NopCloser(bytes.NewReader(file)), - }, nil - } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests - filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) - if _, err := os.Stat(filename); err != nil { - return ResponseNotFound, nil - } - file, err := os.ReadFile(filename) - if err != nil { - return ResponseServerError, err - } - var data map[string]any - if err := json.Unmarshal(file, &data); err != nil { - return ResponseServerError, err - } - - return &http.Response{ - Proto: "Local", - StatusCode: http.StatusOK, - Header: http.Header{ - "Content-Type": []string{data["mediaType"].(string)}, - }, - ContentLength: int64(len(file)), - Body: io.NopCloser(bytes.NewReader(file)), - }, nil - } - return ResponseNotAllowed, nil + return i.get(request) default: - return ResponseNotAllowed, nil + return responseNotAllowed, nil } } + +// head method for http.MethodHead. check if file is exist in blobs dir or manifests dir +func (i imageTransport) head(request *http.Request) (*http.Response, error) { + if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs + filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path)) + if _, err := os.Stat(filename); err != nil { + return responseNotFound, err + } + + return responseOK, nil + } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests + filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) + if _, err := os.Stat(filename); err != nil { + return responseNotFound, err + } + + file, err := os.ReadFile(filename) + if err != nil { + return responseServerError, err + } + + var data map[string]any + if err := json.Unmarshal(file, &data); err != nil { + return responseServerError, err + } + + mediaType, ok := data["mediaType"].(string) + if !ok { + return responseServerError, nil + } + + return &http.Response{ + Proto: "Local", + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": []string{mediaType}, + }, + ContentLength: int64(len(file)), + }, nil + } + + return responseNotAllowed, nil +} + +// post method for http.MethodPost, accept request. +func (i imageTransport) post(request *http.Request) (*http.Response, error) { + if strings.HasSuffix(request.URL.Path, "/uploads/") { + return &http.Response{ + Proto: "Local", + StatusCode: http.StatusAccepted, + Header: http.Header{ + "Location": []string{filepath.Dir(request.URL.Path)}, + }, + Request: request, + }, nil + } + + return responseNotAllowed, nil +} + +// put method for http.MethodPut, create file in blobs dir or manifests dir +func (i imageTransport) put(request *http.Request) (*http.Response, error) { + if strings.HasSuffix(request.URL.Path, "/uploads") { // blobs + body, err := io.ReadAll(request.Body) + if err != nil { + return responseServerError, nil + } + defer request.Body.Close() + + filename := filepath.Join(i.baseDir, "blobs", request.URL.Query().Get("digest")) + if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil { + return responseServerError, nil + } + + if err := os.WriteFile(filename, body, os.ModePerm); err != nil { + return responseServerError, nil + } + + return responseCreated, nil + } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "/manifests") { // manifest + filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) + if err := os.MkdirAll(filepath.Dir(filename), os.ModePerm); err != nil { + return responseServerError, nil + } + + body, err := io.ReadAll(request.Body) + if err != nil { + return responseServerError, nil + } + defer request.Body.Close() + + if err := os.WriteFile(filename, body, os.ModePerm); err != nil { + return responseServerError, nil + } + + return responseCreated, nil + } + + return responseNotAllowed, nil +} + +// get method for http.MethodGet, get file in blobs dir or manifest dir +func (i imageTransport) get(request *http.Request) (*http.Response, error) { + if strings.HasSuffix(filepath.Dir(request.URL.Path), "blobs") { // blobs + filename := filepath.Join(i.baseDir, "blobs", filepath.Base(request.URL.Path)) + if _, err := os.Stat(filename); err != nil { + return responseNotFound, err + } + + file, err := os.ReadFile(filename) + if err != nil { + return responseServerError, err + } + + return &http.Response{ + Proto: "Local", + StatusCode: http.StatusOK, + ContentLength: int64(len(file)), + Body: io.NopCloser(bytes.NewReader(file)), + }, nil + } else if strings.HasSuffix(filepath.Dir(request.URL.Path), "manifests") { // manifests + filename := filepath.Join(i.baseDir, strings.TrimPrefix(request.URL.Path, apiPrefix)) + if _, err := os.Stat(filename); err != nil { + return responseNotFound, err + } + + file, err := os.ReadFile(filename) + if err != nil { + return responseServerError, err + } + + var data map[string]any + if err := json.Unmarshal(file, &data); err != nil { + return responseServerError, err + } + + mediaType, ok := data["mediaType"].(string) + if !ok { + return responseServerError, nil + } + + return &http.Response{ + Proto: "Local", + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": []string{mediaType}, + }, + ContentLength: int64(len(file)), + Body: io.NopCloser(bytes.NewReader(file)), + }, nil + } + + return responseNotAllowed, nil +} diff --git a/pkg/modules/image_test.go b/pkg/modules/image_test.go new file mode 100644 index 00000000..680f503b --- /dev/null +++ b/pkg/modules/image_test.go @@ -0,0 +1,75 @@ +package modules + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestModuleImage(t *testing.T) { + testcases := []struct { + name string + opt ExecOptions + exceptStdout string + exceptStderr string + }{ + { + name: "pull is not map", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{ +"pull": "" +}`), + }, + Variable: &testVariable{}, + }, + exceptStderr: "\"pull\" should be map", + }, + { + name: "pull.manifests is empty", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{ +"pull": {} +}`), + }, + Variable: &testVariable{}, + }, + exceptStderr: "\"pull.manifests\" is required", + }, + { + name: "push is not map", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{ +"push": "" +}`), + }, + Variable: &testVariable{}, + }, + exceptStderr: "\"push\" should be map", + }, + { + name: "push.registry is empty", + opt: ExecOptions{ + Args: runtime.RawExtension{ + Raw: []byte(`{ +"push": {} +}`), + }, + Variable: &testVariable{}, + }, + exceptStderr: "\"push.registry\" is required", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + stdout, stderr := ModuleImage(context.Background(), testcase.opt) + assert.Equal(t, testcase.exceptStdout, stdout) + assert.Equal(t, testcase.exceptStderr, stderr) + }) + } +} diff --git a/pkg/modules/module.go b/pkg/modules/module.go index 1e6e18f5..0e05a185 100644 --- a/pkg/modules/module.go +++ b/pkg/modules/module.go @@ -33,18 +33,20 @@ import ( // message for stdout const ( - // stdoutSuccess message for common module - stdoutSuccess = "success" - stdoutSkip = "skip" + // StdoutSuccess message for common module + StdoutSuccess = "success" + StdoutSkip = "skip" - // stdoutTrue for bool module - stdoutTrue = "True" - // stdoutFalse for bool module - stdoutFalse = "False" + // StdoutTrue for bool module + StdoutTrue = "True" + // StdoutFalse for bool module + StdoutFalse = "False" ) +// ModuleExecFunc exec module type ModuleExecFunc func(ctx context.Context, options ExecOptions) (stdout string, stderr string) +// ExecOptions for module type ExecOptions struct { // the defined Args for module Args runtime.RawExtension @@ -58,16 +60,34 @@ type ExecOptions struct { Pipeline kkcorev1.Pipeline } +func (o ExecOptions) getAllVariables() (map[string]any, error) { + ha, err := o.Variable.Get(variable.GetAllVariable(o.Host)) + if err != nil { + return nil, fmt.Errorf("failed to get host %s variable: %w", o.Host, err) + } + + vd, ok := ha.(map[string]any) + if !ok { + return nil, fmt.Errorf("host: %s variable is not a map", o.Host) + } + + return vd, nil +} + var module = make(map[string]ModuleExecFunc) +// RegisterModule register module func RegisterModule(moduleName string, exec ModuleExecFunc) error { if _, ok := module[moduleName]; ok { return fmt.Errorf("module %s is exist", moduleName) } + module[moduleName] = exec + return nil } +// FindModule by module name which has register. func FindModule(moduleName string) ModuleExecFunc { return module[moduleName] } @@ -91,23 +111,31 @@ var ConnKey = struct{}{} func getConnector(ctx context.Context, host string, data map[string]any) (connector.Connector, error) { var conn connector.Connector var err error + if v := ctx.Value(ConnKey); v != nil { - conn = v.(connector.Connector) + if vd, ok := v.(connector.Connector); ok { + conn = vd + } } else { connectorVars := make(map[string]any) + if c1, ok := data[_const.VariableConnector]; ok { if c2, ok := c1.(map[string]any); ok { connectorVars = c2 } } + conn, err = connector.NewConnector(host, connectorVars) if err != nil { return conn, err } } + if err = conn.Init(ctx); err != nil { klog.V(4).ErrorS(err, "failed to init connector") + return conn, err } + return conn, nil } diff --git a/pkg/modules/module_test.go b/pkg/modules/module_test.go index 0da2fc60..b518e557 100644 --- a/pkg/modules/module_test.go +++ b/pkg/modules/module_test.go @@ -18,7 +18,7 @@ package modules import ( "context" - "fmt" + "errors" "io" "io/fs" @@ -34,22 +34,23 @@ func (v testVariable) Key() string { return "testModule" } -func (v testVariable) Get(f variable.GetFunc) (any, error) { +func (v testVariable) Get(variable.GetFunc) (any, error) { return v.value, v.err } -func (v *testVariable) Merge(f variable.MergeFunc) error { +func (v *testVariable) Merge(variable.MergeFunc) error { v.value = map[string]any{ "k": "v", } + return nil } var successConnector = &testConnector{output: []byte("success")} var failedConnector = &testConnector{ - copyErr: fmt.Errorf("failed"), - fetchErr: fmt.Errorf("failed"), - commandErr: fmt.Errorf("failed"), + copyErr: errors.New("failed"), + fetchErr: errors.New("failed"), + commandErr: errors.New("failed"), } type testConnector struct { @@ -66,22 +67,22 @@ type testConnector struct { commandErr error } -func (t testConnector) Init(ctx context.Context) error { +func (t testConnector) Init(context.Context) error { return t.initErr } -func (t testConnector) Close(ctx context.Context) error { +func (t testConnector) Close(context.Context) error { return t.closeErr } -func (t testConnector) PutFile(ctx context.Context, local []byte, remoteFile string, mode fs.FileMode) error { +func (t testConnector) PutFile(context.Context, []byte, string, fs.FileMode) error { return t.copyErr } -func (t testConnector) FetchFile(ctx context.Context, remoteFile string, local io.Writer) error { +func (t testConnector) FetchFile(context.Context, string, io.Writer) error { return t.fetchErr } -func (t testConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) { +func (t testConnector) ExecuteCommand(context.Context, string) ([]byte, error) { return t.output, t.commandErr } diff --git a/pkg/modules/set_fact.go b/pkg/modules/set_fact.go index 69f460ef..a77cf06c 100644 --- a/pkg/modules/set_fact.go +++ b/pkg/modules/set_fact.go @@ -23,12 +23,14 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/variable" ) -func ModuleSetFact(ctx context.Context, options ExecOptions) (string, string) { +// ModuleSetFact deal "set_fact" module +func ModuleSetFact(_ context.Context, options ExecOptions) (string, string) { // get host variable args := variable.Extension2Variables(options.Args) - if err := options.Variable.Merge(variable.MergeAllRuntimeVariable(options.Host, args)); err != nil { + if err := options.Variable.Merge(variable.MergeAllRuntimeVariable(args, options.Host)); err != nil { return "", fmt.Sprintf("set_fact error: %v", err) } - return stdoutSuccess, "" + + return StdoutSuccess, "" } diff --git a/pkg/modules/set_fact_test.go b/pkg/modules/set_fact_test.go index bde348b3..62107696 100644 --- a/pkg/modules/set_fact_test.go +++ b/pkg/modules/set_fact_test.go @@ -54,6 +54,7 @@ func TestSetFact(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() + stdout, stderr := ModuleSetFact(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, stdout) assert.Equal(t, tc.exceptStderr, stderr) diff --git a/pkg/modules/template.go b/pkg/modules/template.go index ae9a99e3..a9b46194 100644 --- a/pkg/modules/template.go +++ b/pkg/modules/template.go @@ -18,182 +18,268 @@ package modules import ( "context" + "errors" "fmt" "io/fs" "os" "path/filepath" "strings" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubesphere/kubekey/v4/pkg/connector" + kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" "github.com/kubesphere/kubekey/v4/pkg/converter/tmpl" "github.com/kubesphere/kubekey/v4/pkg/project" "github.com/kubesphere/kubekey/v4/pkg/variable" ) -func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) { - // get host variable - ha, err := options.Variable.Get(variable.GetAllVariable(options.Host)) +type templateArgs struct { + src string + dest string + mode *int +} + +func newTemplateArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*templateArgs, error) { + var err error + // check args + ta := &templateArgs{} + args := variable.Extension2Variables(raw) + + ta.src, err = variable.StringVar(vars, args, "src") if err != nil { - return "", fmt.Sprintf("failed to get host variable: %v", err) + klog.V(4).ErrorS(err, "\"src\" should be string") + + return nil, errors.New("\"src\" should be string") } - // check args - args := variable.Extension2Variables(options.Args) - srcParam, err := variable.StringVar(ha.(map[string]any), args, "src") + ta.dest, err = variable.StringVar(vars, args, "dest") if err != nil { - return "", "\"src\" should be string" + return nil, errors.New("\"dest\" should be string") } - destParam, err := variable.StringVar(ha.(map[string]any), args, "dest") + + ta.mode, _ = variable.IntVar(vars, args, "mode") + + return ta, nil +} + +// ModuleTemplate deal "template" module +func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) { + // get host variable + ha, err := options.getAllVariables() if err != nil { - return "", "\"dest\" should be string" + return "", err.Error() + } + + ta, err := newTemplateArgs(ctx, options.Args, ha) + if err != nil { + klog.V(4).ErrorS(err, "get template args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task)) + + return "", err.Error() } // get connector - conn, err := getConnector(ctx, options.Host, ha.(map[string]any)) + conn, err := getConnector(ctx, options.Host, ha) if err != nil { return "", err.Error() } defer conn.Close(ctx) - if filepath.IsAbs(srcParam) { - fileInfo, err := os.Stat(srcParam) + if filepath.IsAbs(ta.src) { + fileInfo, err := os.Stat(ta.src) if err != nil { - return "", fmt.Sprintf(" get src file %s in local path error: %v", srcParam, err) + return "", fmt.Sprintf(" get src file %s in local path error: %v", ta.src, err) } if fileInfo.IsDir() { // src is dir - if err := filepath.WalkDir(srcParam, func(path string, d fs.DirEntry, err error) error { - if d.IsDir() { // only copy file - return nil - } - if err != nil { - return fmt.Errorf("walk dir %s error: %w", srcParam, err) - } - - // get file old mode - info, err := d.Info() - if err != nil { - return fmt.Errorf("get file info error: %w", err) - } - mode := info.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - // read file - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("read file error: %w", err) - } - result, err := tmpl.ParseString(ha.(map[string]any), string(data)) - if err != nil { - return fmt.Errorf("parse file error: %w", err) - } - // copy file to remote - var destFilename = destParam - if strings.HasSuffix(destParam, "/") { - rel, err := filepath.Rel(srcParam, path) - if err != nil { - return fmt.Errorf("get relative file path error: %w", err) - } - destFilename = filepath.Join(destParam, rel) - } - if err := conn.PutFile(ctx, []byte(result), destFilename, mode); err != nil { - return fmt.Errorf("copy file error: %w", err) - } - return nil - }); err != nil { - return "", fmt.Sprintf(" walk dir %s in local path error: %v", srcParam, err) + if err := ta.absDir(ctx, conn, ha); err != nil { + return "", fmt.Sprintf("sync template absolute dir error %s", err) } } else { // src is file - data, err := os.ReadFile(srcParam) - if err != nil { - return "", fmt.Sprintf("read file error: %v", err) - } - result, err := tmpl.ParseString(ha.(map[string]any), string(data)) - if err != nil { - return "", fmt.Sprintf("parse file error: %v", err) - } - if strings.HasSuffix(destParam, "/") { - destParam += filepath.Base(srcParam) - } - mode := fileInfo.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - if err := conn.PutFile(ctx, []byte(result), destParam, mode); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) + if err := ta.absFile(ctx, fileInfo.Mode(), conn, ha); err != nil { + return "", fmt.Sprintf("sync template absolute file error %s", err) } } } else { - pj, err := project.New(options.Pipeline, false) + pj, err := project.New(ctx, options.Pipeline, false) if err != nil { return "", fmt.Sprintf("get project error: %v", err) } - fileInfo, err := pj.Stat(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) + + fileInfo, err := pj.Stat(ta.src, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) if err != nil { - return "", fmt.Sprintf("get file %s from project error %v", srcParam, err) + return "", fmt.Sprintf("get file %s from project error: %v", ta.src, err) } if fileInfo.IsDir() { - if err := pj.WalkDir(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}, func(path string, d fs.DirEntry, err error) error { - if d.IsDir() { // only copy file - return nil - } - if err != nil { - return fmt.Errorf("walk dir %s error: %w", srcParam, err) - } - - info, err := d.Info() - if err != nil { - return fmt.Errorf("get file info error: %w", err) - } - mode := info.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - data, err := pj.ReadFile(path, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return fmt.Errorf("read file error: %w", err) - } - result, err := tmpl.ParseString(ha.(map[string]any), string(data)) - if err != nil { - return fmt.Errorf("parse file error: %w", err) - } - var destFilename = destParam - if strings.HasSuffix(destParam, "/") { - rel, err := pj.Rel(srcParam, path, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return fmt.Errorf("get relative file path error: %w", err) - } - destFilename = filepath.Join(destParam, rel) - } - if err := conn.PutFile(ctx, []byte(result), destFilename, mode); err != nil { - return fmt.Errorf("copy file error: %w", err) - } - return nil - }); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) + if err := ta.relDir(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], conn, ha); err != nil { + return "", fmt.Sprintf("sync template relative dir error: %s", err) } } else { - data, err := pj.ReadFile(srcParam, project.GetFileOption{IsTemplate: true, Role: options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole]}) - if err != nil { - return "", fmt.Sprintf("read file error: %v", err) - } - result, err := tmpl.ParseString(ha.(map[string]any), string(data)) - if err != nil { - return "", fmt.Sprintf("parse file error: %v", err) - } - if strings.HasSuffix(destParam, "/") { - destParam += filepath.Base(srcParam) - } - mode := fileInfo.Mode() - if modeParam, err := variable.IntVar(ha.(map[string]any), args, "mode"); err == nil { - mode = os.FileMode(modeParam) - } - if err := conn.PutFile(ctx, []byte(result), destParam, mode); err != nil { - return "", fmt.Sprintf("copy file error: %v", err) + if err := ta.relFile(ctx, pj, options.Task.Annotations[kkcorev1alpha1.TaskAnnotationRole], fileInfo.Mode(), conn, ha); err != nil { + return "", fmt.Sprintf("sync template relative dir error: %s", err) } } } - return stdoutSuccess, "" + + return StdoutSuccess, "" +} + +// relFile when template.src is relative file, get file from project, parse it, and copy to remote. +func (ta templateArgs) relFile(ctx context.Context, pj project.Project, role string, mode fs.FileMode, conn connector.Connector, vars map[string]any) any { + data, err := pj.ReadFile(ta.src, project.GetFileOption{IsTemplate: true, Role: role}) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + + result, err := tmpl.ParseString(vars, string(data)) + if err != nil { + return fmt.Errorf("parse file error: %w", err) + } + + dest := ta.dest + if strings.HasSuffix(ta.dest, "/") { + dest = filepath.Join(ta.dest, filepath.Base(ta.src)) + } + + if ta.mode != nil { + mode = os.FileMode(*ta.mode) + } + + if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil +} + +// relDir when template.src is relative dir, get all files from project, parse it, and copy to remote. +func (ta templateArgs) relDir(ctx context.Context, pj project.Project, role string, conn connector.Connector, vars map[string]any) error { + if err := pj.WalkDir(ta.src, project.GetFileOption{IsTemplate: true, Role: role}, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %w", ta.src, err) + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %w", err) + } + + mode := info.Mode() + if ta.mode != nil { + mode = os.FileMode(*ta.mode) + } + + data, err := pj.ReadFile(path, project.GetFileOption{IsTemplate: true, Role: role}) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + result, err := tmpl.ParseString(vars, string(data)) + if err != nil { + return fmt.Errorf("parse file error: %w", err) + } + + dest := ta.dest + if strings.HasSuffix(ta.dest, "/") { + rel, err := pj.Rel(ta.src, path, project.GetFileOption{IsTemplate: true, Role: role}) + if err != nil { + return fmt.Errorf("get relative file path error: %w", err) + } + dest = filepath.Join(ta.dest, rel) + } + + if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil + }); err != nil { + return err + } + + return nil +} + +// absFile when template.src is absolute file, get file by os, parse it, and copy to remote. +func (ta templateArgs) absFile(ctx context.Context, mode fs.FileMode, conn connector.Connector, vars map[string]any) error { + data, err := os.ReadFile(ta.src) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + + result, err := tmpl.ParseString(vars, string(data)) + if err != nil { + return fmt.Errorf("parse file error: %w", err) + } + + dest := ta.dest + if strings.HasSuffix(ta.dest, "/") { + dest = filepath.Join(ta.dest, filepath.Base(ta.src)) + } + + if ta.mode != nil { + mode = os.FileMode(*ta.mode) + } + + if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil +} + +// absDir when template.src is absolute dir, get all files by os, parse it, and copy to remote. +func (ta templateArgs) absDir(ctx context.Context, conn connector.Connector, vars map[string]any) error { + if err := filepath.WalkDir(ta.src, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { // only copy file + return nil + } + if err != nil { + return fmt.Errorf("walk dir %s error: %w", ta.src, err) + } + + // get file old mode + info, err := d.Info() + if err != nil { + return fmt.Errorf("get file info error: %w", err) + } + mode := info.Mode() + if ta.mode != nil { + mode = os.FileMode(*ta.mode) + } + // read file + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("read file error: %w", err) + } + result, err := tmpl.ParseString(vars, string(data)) + if err != nil { + return fmt.Errorf("parse file error: %w", err) + } + // copy file to remote + dest := ta.dest + if strings.HasSuffix(ta.dest, "/") { + rel, err := filepath.Rel(ta.src, path) + if err != nil { + return fmt.Errorf("get relative file path error: %w", err) + } + dest = filepath.Join(ta.dest, rel) + } + + if err := conn.PutFile(ctx, []byte(result), dest, mode); err != nil { + return fmt.Errorf("copy file error: %w", err) + } + + return nil + }); err != nil { + return err + } + + return nil } diff --git a/pkg/modules/template_test.go b/pkg/modules/template_test.go index 9b7d1d32..95f5c727 100644 --- a/pkg/modules/template_test.go +++ b/pkg/modules/template_test.go @@ -32,6 +32,7 @@ func TestTemplate(t *testing.T) { absPath, err := filepath.Abs(os.Args[0]) if err != nil { fmt.Println("Error getting absolute path:", err) + return } @@ -76,6 +77,7 @@ func TestTemplate(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithTimeout(tc.ctxFunc(), time.Second*5) defer cancel() + acStdout, acStderr := ModuleTemplate(ctx, tc.opt) assert.Equal(t, tc.exceptStdout, acStdout) assert.Equal(t, tc.exceptStderr, acStderr) diff --git a/pkg/project/builtin.go b/pkg/project/builtin.go index 0943d974..a9a201f7 100644 --- a/pkg/project/builtin.go +++ b/pkg/project/builtin.go @@ -20,25 +20,27 @@ limitations under the License. package project import ( - "fmt" + "errors" "io/fs" "os" "path/filepath" "github.com/kubesphere/kubekey/v4/builtin" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" ) func init() { builtinProjectFunc = func(pipeline kkcorev1.Pipeline) (Project, error) { if pipeline.Spec.Playbook == "" { - return nil, fmt.Errorf("playbook should not be empty") + return nil, errors.New("playbook should not be empty") } + if filepath.IsAbs(pipeline.Spec.Playbook) { - return nil, fmt.Errorf("playbook should be relative path base on project.addr") + return nil, errors.New("playbook should be relative path base on project.addr") } + return &builtinProject{Pipeline: pipeline, FS: builtin.BuiltinPipeline, playbook: pipeline.Spec.Playbook}, nil } } @@ -78,25 +80,31 @@ func (p builtinProject) getFilePath(path string, o GetFileOption) string { return s } } + return "" } +// MarshalPlaybook project file to playbook. +func (p builtinProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) { + return marshalPlaybook(p.FS, p.playbook) +} + +// Stat role/file/template file or dir in project func (p builtinProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { return fs.Stat(p.FS, p.getFilePath(path, option)) } +// WalkDir role/file/template dir in project func (p builtinProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { return fs.WalkDir(p.FS, p.getFilePath(path, option), f) } +// ReadFile role/file/template file or dir in project func (p builtinProject) ReadFile(path string, option GetFileOption) ([]byte, error) { return fs.ReadFile(p.FS, p.getFilePath(path, option)) } -func (p builtinProject) MarshalPlaybook() (*projectv1.Playbook, error) { - return marshalPlaybook(p.FS, p.playbook) -} - +// Rel path for role/file/template file or dir in project func (p builtinProject) Rel(root string, path string, option GetFileOption) (string, error) { return filepath.Rel(p.getFilePath(root, option), path) } diff --git a/pkg/project/git.go b/pkg/project/git.go index 7011ab11..2aa7d593 100644 --- a/pkg/project/git.go +++ b/pkg/project/git.go @@ -31,38 +31,42 @@ import ( "k8s.io/klog/v2" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" ) -func newGitProject(pipeline kkcorev1.Pipeline, update bool) (Project, error) { +func newGitProject(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) { if pipeline.Spec.Playbook == "" || pipeline.Spec.Project.Addr == "" { - return nil, fmt.Errorf("playbook and project.addr should not be empty") + return nil, errors.New("playbook and project.addr should not be empty") } + if filepath.IsAbs(pipeline.Spec.Playbook) { - return nil, fmt.Errorf("playbook should be relative path base on project.addr") + return nil, errors.New("playbook should be relative path base on project.addr") } // git clone to project dir if pipeline.Spec.Project.Name == "" { pipeline.Spec.Project.Name = strings.TrimSuffix(pipeline.Spec.Project.Addr[strings.LastIndex(pipeline.Spec.Project.Addr, "/")+1:], ".git") } + p := &gitProject{ Pipeline: pipeline, projectDir: filepath.Join(_const.GetWorkDir(), _const.ProjectDir, pipeline.Spec.Project.Name), playbook: pipeline.Spec.Playbook, } + if _, err := os.Stat(p.projectDir); os.IsNotExist(err) { // git clone - if err := p.gitClone(context.Background()); err != nil { + if err := p.gitClone(ctx); err != nil { return nil, fmt.Errorf("clone git project error: %w", err) } } else if update { // git pull - if err := p.gitPull(context.Background()); err != nil { + if err := p.gitPull(ctx); err != nil { return nil, fmt.Errorf("pull git project error: %w", err) } } + return p, nil } @@ -70,6 +74,7 @@ func newGitProject(pipeline kkcorev1.Pipeline, update bool) (Project, error) { type gitProject struct { kkcorev1.Pipeline + //location projectDir string // playbook relpath base on projectDir playbook string @@ -102,25 +107,10 @@ func (p gitProject) getFilePath(path string, o GetFileOption) string { return s } } + return "" } -func (p gitProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { - return os.Stat(p.getFilePath(path, option)) -} - -func (p gitProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { - return filepath.WalkDir(p.getFilePath(path, option), f) -} - -func (p gitProject) ReadFile(path string, option GetFileOption) ([]byte, error) { - return os.ReadFile(p.getFilePath(path, option)) -} - -func (p gitProject) MarshalPlaybook() (*projectv1.Playbook, error) { - return marshalPlaybook(os.DirFS(p.projectDir), p.Pipeline.Spec.Playbook) -} - func (p gitProject) gitClone(ctx context.Context) error { if _, err := git.PlainCloneContext(ctx, p.projectDir, false, &git.CloneOptions{ URL: p.Pipeline.Spec.Project.Addr, @@ -131,8 +121,10 @@ func (p gitProject) gitClone(ctx context.Context) error { InsecureSkipTLS: false, }); err != nil { klog.Errorf("clone project %s failed: %v", p.Pipeline.Spec.Project.Addr, err) + return err } + return nil } @@ -140,13 +132,17 @@ func (p gitProject) gitPull(ctx context.Context) error { open, err := git.PlainOpen(p.projectDir) if err != nil { klog.V(4).ErrorS(err, "git open error", "local_dir", p.projectDir) + return err } + wt, err := open.Worktree() if err != nil { klog.V(4).ErrorS(err, "git open worktree error", "local_dir", p.projectDir) + return err } + if err := wt.PullContext(ctx, &git.PullOptions{ RemoteURL: p.Pipeline.Spec.Project.Addr, ReferenceName: plumbing.NewBranchReferenceName(p.Pipeline.Spec.Project.Branch), @@ -155,12 +151,34 @@ func (p gitProject) gitPull(ctx context.Context) error { InsecureSkipTLS: false, }); err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { klog.V(4).ErrorS(err, "git pull error", "local_dir", p.projectDir) + return err } return nil } +// MarshalPlaybook project file to playbook. +func (p gitProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) { + return marshalPlaybook(os.DirFS(p.projectDir), p.Pipeline.Spec.Playbook) +} + +// Stat role/file/template file or dir in project +func (p gitProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { + return os.Stat(p.getFilePath(path, option)) +} + +// WalkDir role/file/template dir in project +func (p gitProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { + return filepath.WalkDir(p.getFilePath(path, option), f) +} + +// ReadFile role/file/template file or dir in project +func (p gitProject) ReadFile(path string, option GetFileOption) ([]byte, error) { + return os.ReadFile(p.getFilePath(path, option)) +} + +// Rel path for role/file/template file or dir in project func (p gitProject) Rel(root string, path string, option GetFileOption) (string, error) { return filepath.Rel(p.getFilePath(root, option), path) } diff --git a/pkg/project/helper.go b/pkg/project/helper.go index c28386ae..60e4ec16 100644 --- a/pkg/project/helper.go +++ b/pkg/project/helper.go @@ -24,110 +24,138 @@ import ( "gopkg.in/yaml.v3" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" ) -// marshalPlaybook projectv1.Playbook from a playbook file -func marshalPlaybook(baseFS fs.FS, pbPath string) (*projectv1.Playbook, error) { - // convert playbook to projectv1.Playbook - pb := &projectv1.Playbook{} +// marshalPlaybook kkprojectv1.Playbook from a playbook file +func marshalPlaybook(baseFS fs.FS, pbPath string) (*kkprojectv1.Playbook, error) { + // convert playbook to kkprojectv1.Playbook + pb := &kkprojectv1.Playbook{} if err := loadPlaybook(baseFS, pbPath, pb); err != nil { return nil, fmt.Errorf("load playbook failed: %w", err) } - - // convertRoles + // convertRoles. if err := convertRoles(baseFS, pbPath, pb); err != nil { return nil, fmt.Errorf("convert roles failed: %w", err) } - + // convertIncludeTasks if err := convertIncludeTasks(baseFS, pbPath, pb); err != nil { return nil, fmt.Errorf("convert include tasks failed: %w", err) } - + // validate playbook if err := pb.Validate(); err != nil { return nil, fmt.Errorf("validate playbook failed: %w", err) } + return pb, nil } // loadPlaybook with include_playbook. Join all playbooks into one playbook -func loadPlaybook(baseFS fs.FS, pbPath string, pb *projectv1.Playbook) error { +func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error { // baseDir is the local ansible project dir which playbook belong to pbData, err := fs.ReadFile(baseFS, pbPath) if err != nil { return fmt.Errorf("read playbook failed: %w", err) } - var plays []projectv1.Play + var plays []kkprojectv1.Play if err := yaml.Unmarshal(pbData, &plays); err != nil { return fmt.Errorf("unmarshal playbook failed: %w", err) } for _, p := range plays { - if p.ImportPlaybook != "" { - importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook) - if importPlaybook == "" { - return fmt.Errorf("import playbook %s failed", p.ImportPlaybook) - } - if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil { - return fmt.Errorf("load playbook failed: %w", err) - } + if err := dealImportPlaybook(p, baseFS, pbPath, pb); err != nil { + return fmt.Errorf("load import_playbook failed: %w", err) } - // load var_files (optional) - for _, file := range p.VarsFiles { - if _, err := fs.Stat(baseFS, filepath.Join(filepath.Dir(pbPath), file)); err != nil { - return fmt.Errorf("file %s not exists", file) - } - mainData, err := fs.ReadFile(baseFS, filepath.Join(filepath.Dir(pbPath), file)) - if err != nil { - return fmt.Errorf("read file %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) - } - - var vars map[string]any - var node yaml.Node // marshal file on defined order - if err := yaml.Unmarshal(mainData, &node); err != nil { - return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) - } - if err := node.Decode(&vars); err != nil { - return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) - } - - p.Vars, err = combineMaps(p.Vars, vars) - if err != nil { - return fmt.Errorf("combine maps file:%s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) - } + if err := dealVarsFiles(p, baseFS, pbPath); err != nil { + return fmt.Errorf("load vars_files failed: %w", err) } - // fill block in roles - for i, r := range p.Roles { - roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) - if roleBase == "" { - return fmt.Errorf("cannot found Role %s", r.Role) - } - mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) - if mainTask == "" { - return fmt.Errorf("cannot found main task for Role %s", r.Role) - } - - rdata, err := fs.ReadFile(baseFS, mainTask) - if err != nil { - return fmt.Errorf("read file %s failed: %w", mainTask, err) - } - var blocks []projectv1.Block - if err := yaml.Unmarshal(rdata, &blocks); err != nil { - return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err) - } - p.Roles[i].Block = blocks + if err := dealRoles(p, baseFS, pbPath); err != nil { + return fmt.Errorf("load roles failed: %w", err) } + pb.Play = append(pb.Play, p) } return nil } +// dealImportPlaybook "import_playbook" argument in play +func dealImportPlaybook(p kkprojectv1.Play, baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error { + if p.ImportPlaybook != "" { + importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook) + if importPlaybook == "" { + return fmt.Errorf("import_playbook %s path is empty, it's maybe [project-dir/playbooks/import_playbook_file, playbook-dir/playbooks/import_playbook-file, playbook-dir/import_playbook-file]", p.ImportPlaybook) + } + if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil { + return fmt.Errorf("load playbook failed: %w", err) + } + } + + return nil +} + +// dealVarsFiles "var_files" argument in play +func dealVarsFiles(p kkprojectv1.Play, baseFS fs.FS, pbPath string) error { + for _, file := range p.VarsFiles { + // load vars from vars_files + if _, err := fs.Stat(baseFS, filepath.Join(filepath.Dir(pbPath), file)); err != nil { + return fmt.Errorf("file %s not exists", file) + } + data, err := fs.ReadFile(baseFS, filepath.Join(filepath.Dir(pbPath), file)) + if err != nil { + return fmt.Errorf("read file %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) + } + + var vars map[string]any + var node yaml.Node // marshal file on defined order + if err := yaml.Unmarshal(data, &node); err != nil { + return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) + } + if err := node.Decode(&vars); err != nil { + return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) + } + // store vars in play. the vars defined in file should not be repeated. + p.Vars, err = combineMaps(p.Vars, vars) + if err != nil { + return fmt.Errorf("combine maps file:%s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err) + } + } + + return nil +} + +// dealRoles "roles" argument in play +func dealRoles(p kkprojectv1.Play, baseFS fs.FS, pbPath string) error { + for i, r := range p.Roles { + roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) + if roleBase == "" { + return fmt.Errorf("cannot found Role %s", r.Role) + } + + mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) + if mainTask == "" { + return fmt.Errorf("cannot found main task for Role %s", r.Role) + } + + rdata, err := fs.ReadFile(baseFS, mainTask) + if err != nil { + return fmt.Errorf("read file %s failed: %w", mainTask, err) + } + var blocks []kkprojectv1.Block + if err := yaml.Unmarshal(rdata, &blocks); err != nil { + return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err) + } + p.Roles[i].Block = blocks + } + + return nil +} + // convertRoles convert roleName to block -func convertRoles(baseFS fs.FS, pbPath string, pb *projectv1.Playbook) error { +func convertRoles(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error { for i, p := range pb.Play { for i, r := range p.Roles { roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role) @@ -135,60 +163,76 @@ func convertRoles(baseFS fs.FS, pbPath string, pb *projectv1.Playbook) error { return fmt.Errorf("cannot found Role %s", r.Role) } - // load block - mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) - if mainTask == "" { - return fmt.Errorf("cannot found main task for Role %s", r.Role) + var err error + if p.Roles[i].Block, err = convertRoleBlocks(baseFS, pbPath, roleBase); err != nil { + return fmt.Errorf("convert role %s tasks failed: %w", r.Role, err) } - rdata, err := fs.ReadFile(baseFS, mainTask) - if err != nil { - return fmt.Errorf("read file %s failed: %w", mainTask, err) - } - var blocks []projectv1.Block - if err := yaml.Unmarshal(rdata, &blocks); err != nil { - return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err) - } - p.Roles[i].Block = blocks - - // load defaults (optional) - mainDefault := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile)) - if mainDefault != "" { - mainData, err := fs.ReadFile(baseFS, mainDefault) - if err != nil { - return fmt.Errorf("read defaults variable file %s failed: %w", mainDefault, err) - } - - var vars map[string]any - var node yaml.Node // marshal file on defined order - if err := yaml.Unmarshal(mainData, &node); err != nil { - return fmt.Errorf("unmarshal defaults variable yaml file: %s failed: %w", mainDefault, err) - } - if err := node.Decode(&vars); err != nil { - return fmt.Errorf("decode defaults variable yaml file: %s failed: %w", mainDefault, err) - } - - p.Roles[i].Vars, err = combineMaps(p.Roles[i].Vars, vars) - if err != nil { - return fmt.Errorf("combine defaults variable failed: %w", err) - } + if p.Roles[i].Vars, err = convertRoleVars(baseFS, roleBase, p.Roles[i].Vars); err != nil { + return fmt.Errorf("convert role %s defaults failed: %w", r.Role, err) } } pb.Play[i] = p } + return nil } +func convertRoleVars(baseFS fs.FS, roleBase string, roleVars map[string]any) (map[string]any, error) { + // load defaults (optional) + mainDefault := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesDefaultsDir, _const.ProjectRolesDefaultsMainFile)) + if mainDefault != "" { + mainData, err := fs.ReadFile(baseFS, mainDefault) + if err != nil { + return nil, fmt.Errorf("read defaults variable file %s failed: %w", mainDefault, err) + } + + var vars map[string]any + var node yaml.Node // marshal file on defined order + if err := yaml.Unmarshal(mainData, &node); err != nil { + return nil, fmt.Errorf("unmarshal defaults variable yaml file: %s failed: %w", mainDefault, err) + } + if err := node.Decode(&vars); err != nil { + return nil, fmt.Errorf("decode defaults variable yaml file: %s failed: %w", mainDefault, err) + } + + return combineMaps(roleVars, vars) + } + + return roleVars, nil +} + +// convertRoleBlocks roles/task/main.yaml to []kkprojectv1.Block +func convertRoleBlocks(baseFS fs.FS, pbPath string, roleBase string) ([]kkprojectv1.Block, error) { + mainTask := getYamlFile(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir, _const.ProjectRolesTasksMainFile)) + if mainTask == "" { + return nil, fmt.Errorf("cannot found main task for Role %s", roleBase) + } + + rdata, err := fs.ReadFile(baseFS, mainTask) + if err != nil { + return nil, fmt.Errorf("read file %s failed: %w", mainTask, err) + } + var blocks []kkprojectv1.Block + if err := yaml.Unmarshal(rdata, &blocks); err != nil { + return nil, fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err) + } + + return blocks, nil +} + // convertIncludeTasks from file to blocks -func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *projectv1.Playbook) error { +func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkprojectv1.Playbook) error { var pbBase = filepath.Dir(filepath.Dir(pbPath)) for _, play := range pb.Play { if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil { return fmt.Errorf("convert pre_tasks file %s failed: %w", pbPath, err) } + if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil { return fmt.Errorf("convert tasks file %s failed: %w", pbPath, err) } + if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil { return fmt.Errorf("convert post_tasks file %s failed: %w", pbPath, err) } @@ -200,37 +244,43 @@ func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *projectv1.Playbook) er } } } + return nil } -func fileToBlock(baseFS fs.FS, baseDir string, blocks []projectv1.Block) error { +func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkprojectv1.Block) error { for i, b := range blocks { if b.IncludeTasks != "" { data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks)) if err != nil { return fmt.Errorf("read includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err) } - var bs []projectv1.Block + var bs []kkprojectv1.Block if err := yaml.Unmarshal(data, &bs); err != nil { return fmt.Errorf("unmarshal includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err) } + b.Block = bs blocks[i] = b } + if err := fileToBlock(baseFS, baseDir, b.Block); err != nil { return fmt.Errorf("convert block file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err) } + if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil { return fmt.Errorf("convert rescue file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err) } + if err := fileToBlock(baseFS, baseDir, b.Always); err != nil { return fmt.Errorf("convert always file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err) } } + return nil } -// getPlaybookBaseFromPlaybook +// getPlaybookBaseFromPlaybook find import_playbook path base on the current_playbook // find from project/playbooks/playbook if exists. // find from current_playbook/playbooks/playbook if exists. // find current_playbook/playbook @@ -248,14 +298,8 @@ func getPlaybookBaseFromPlaybook(baseFS fs.FS, pbPath string, playbook string) s return s } } else { - if baseFS != nil { - if _, err := fs.Stat(baseFS, s); err == nil { - return s - } - } else { - if _, err := os.Stat(s); err == nil { - return s - } + if _, err := os.Stat(s); err == nil { + return s } } } @@ -294,9 +338,7 @@ func getRoleBaseFromPlaybook(baseFS fs.FS, pbPath string, roleName string) strin // return *.yml if exists. func getYamlFile(baseFS fs.FS, base string) string { var find []string - find = append(find, - fmt.Sprintf("%s.yaml", base), - fmt.Sprintf("%s.yml", base)) + find = append(find, base+".yaml", base+".yml") for _, s := range find { if baseFS != nil { @@ -329,5 +371,6 @@ func combineMaps(v1, v2 map[string]any) (map[string]any, error) { } mv[k] = v } + return mv, nil } diff --git a/pkg/project/helper_test.go b/pkg/project/helper_test.go index a2082e50..d5074e0e 100644 --- a/pkg/project/helper_test.go +++ b/pkg/project/helper_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" ) func TestGetPlaybookBaseFromAbsPlaybook(t *testing.T) { @@ -128,23 +128,23 @@ func TestMarshalPlaybook(t *testing.T) { testcases := []struct { name string file string - except *projectv1.Playbook + except *kkprojectv1.Playbook }{ { name: "marshal playbook", file: "playbooks/playbook1.yaml", - except: &projectv1.Playbook{Play: []projectv1.Play{ + except: &kkprojectv1.Playbook{Play: []kkprojectv1.Play{ { - Base: projectv1.Base{Name: "play1"}, - PlayHost: projectv1.PlayHost{Hosts: []string{"localhost"}}, - Roles: []projectv1.Role{ + Base: kkprojectv1.Base{Name: "play1"}, + PlayHost: kkprojectv1.PlayHost{Hosts: []string{"localhost"}}, + Roles: []kkprojectv1.Role{ { - RoleInfo: projectv1.RoleInfo{ + RoleInfo: kkprojectv1.RoleInfo{ Role: "role1", - Block: []projectv1.Block{ + Block: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "role1 | block1"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "role1 | block1"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, @@ -155,41 +155,41 @@ func TestMarshalPlaybook(t *testing.T) { }, }, Handlers: nil, - PreTasks: []projectv1.Block{ + PreTasks: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | pre_block1"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | pre_block1"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, }}, }, }, - PostTasks: []projectv1.Block{ + PostTasks: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | post_block1"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | post_block1"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, }}, }, }, - Tasks: []projectv1.Block{ + Tasks: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | block1"}}, - BlockInfo: projectv1.BlockInfo{Block: []projectv1.Block{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1"}}, + BlockInfo: kkprojectv1.BlockInfo{Block: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | block1 | block1"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1 | block1"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, }}, }, { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | block1 | block2"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block1 | block2"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, @@ -198,8 +198,8 @@ func TestMarshalPlaybook(t *testing.T) { }}, }, { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play1 | block2"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play1 | block2"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, @@ -208,12 +208,12 @@ func TestMarshalPlaybook(t *testing.T) { }, }, { - Base: projectv1.Base{Name: "play2"}, - PlayHost: projectv1.PlayHost{Hosts: []string{"localhost"}}, - Tasks: []projectv1.Block{ + Base: kkprojectv1.Base{Name: "play2"}, + PlayHost: kkprojectv1.PlayHost{Hosts: []string{"localhost"}}, + Tasks: []kkprojectv1.Block{ { - BlockBase: projectv1.BlockBase{Base: projectv1.Base{Name: "play2 | block1"}}, - Task: projectv1.Task{UnknownFiled: map[string]any{ + BlockBase: kkprojectv1.BlockBase{Base: kkprojectv1.Base{Name: "play2 | block1"}}, + Task: kkprojectv1.Task{UnknownField: map[string]any{ "debug": map[string]any{ "msg": "echo \"hello world\"", }, @@ -227,7 +227,9 @@ func TestMarshalPlaybook(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { pb, err := marshalPlaybook(os.DirFS("testdata"), tc.file) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, tc.except, pb) }) } diff --git a/pkg/project/local.go b/pkg/project/local.go index 13418aba..f1ccc01f 100644 --- a/pkg/project/local.go +++ b/pkg/project/local.go @@ -17,13 +17,14 @@ limitations under the License. package project import ( + "errors" "fmt" "io/fs" "os" "path/filepath" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" ) @@ -42,15 +43,18 @@ func newLocalProject(pipeline kkcorev1.Pipeline) (Project, error) { if _, err := os.Stat(pipeline.Spec.Playbook); err != nil { return nil, fmt.Errorf("cannot find playbook %s", pipeline.Spec.Playbook) } + if filepath.Base(filepath.Dir(pipeline.Spec.Playbook)) != _const.ProjectPlaybooksDir { // the format of playbook is not correct - return nil, fmt.Errorf("playbook should be projectDir/playbooks/playbookfile") + return nil, errors.New("playbook should be projectDir/playbooks/playbookfile") } + projectDir := filepath.Dir(filepath.Dir(pipeline.Spec.Playbook)) playbook, err := filepath.Rel(projectDir, pipeline.Spec.Playbook) if err != nil { return nil, err } + return &localProject{Pipeline: pipeline, projectDir: projectDir, playbook: playbook}, nil } @@ -92,25 +96,31 @@ func (p localProject) getFilePath(path string, o GetFileOption) string { return s } } + return "" } +// MarshalPlaybook project file to playbook. +func (p localProject) MarshalPlaybook() (*kkprojectv1.Playbook, error) { + return marshalPlaybook(os.DirFS(p.projectDir), p.playbook) +} + +// Stat role/file/template file or dir in project func (p localProject) Stat(path string, option GetFileOption) (os.FileInfo, error) { return os.Stat(p.getFilePath(path, option)) } +// WalkDir role/file/template dir in project func (p localProject) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error { return filepath.WalkDir(p.getFilePath(path, option), f) } +// ReadFile role/file/template file or dir in project func (p localProject) ReadFile(path string, option GetFileOption) ([]byte, error) { return os.ReadFile(p.getFilePath(path, option)) } -func (p localProject) MarshalPlaybook() (*projectv1.Playbook, error) { - return marshalPlaybook(os.DirFS(p.projectDir), p.playbook) -} - +// Rel path for role/file/template file or dir in project func (p localProject) Rel(root string, path string, option GetFileOption) (string, error) { return filepath.Rel(p.getFilePath(root, option), path) } diff --git a/pkg/project/project.go b/pkg/project/project.go index fa4b5aa4..70b1e0bd 100644 --- a/pkg/project/project.go +++ b/pkg/project/project.go @@ -17,12 +17,13 @@ limitations under the License. package project import ( + "context" "io/fs" "os" "strings" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" - projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" + kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1" ) var builtinProjectFunc func(kkcorev1.Pipeline) (Project, error) @@ -30,24 +31,29 @@ var builtinProjectFunc func(kkcorev1.Pipeline) (Project, error) // Project represent location of actual project. // get project file should base on it type Project interface { - MarshalPlaybook() (*projectv1.Playbook, error) + MarshalPlaybook() (*kkprojectv1.Playbook, error) Stat(path string, option GetFileOption) (os.FileInfo, error) WalkDir(path string, option GetFileOption, f fs.WalkDirFunc) error ReadFile(path string, option GetFileOption) ([]byte, error) Rel(root string, path string, option GetFileOption) (string, error) } +// GetFileOption for file. type GetFileOption struct { Role string IsTemplate bool IsFile bool } -func New(pipeline kkcorev1.Pipeline, update bool) (Project, error) { +// New project. +// If project address is git format. newGitProject +// If pipeline has BuiltinsProjectAnnotation. builtinProjectFunc +// Default newLocalProject +func New(ctx context.Context, pipeline kkcorev1.Pipeline, update bool) (Project, error) { if strings.HasPrefix(pipeline.Spec.Project.Addr, "https://") || strings.HasPrefix(pipeline.Spec.Project.Addr, "http://") || strings.HasPrefix(pipeline.Spec.Project.Addr, "git@") { - return newGitProject(pipeline, update) + return newGitProject(ctx, pipeline, update) } if _, ok := pipeline.Annotations[kkcorev1.BuiltinsProjectAnnotation]; ok { diff --git a/pkg/proxy/admit.go b/pkg/proxy/admit.go index 8750d5a2..22d34897 100644 --- a/pkg/proxy/admit.go +++ b/pkg/proxy/admit.go @@ -26,20 +26,23 @@ func newAlwaysAdmit() admission.Interface { return &admit{} } -type admit struct { -} +type admit struct{} -func (a admit) Validate(ctx context.Context, attr admission.Attributes, obj admission.ObjectInterfaces) (err error) { +// Validate always pass +func (a admit) Validate(context.Context, admission.Attributes, admission.ObjectInterfaces) error { return nil } -func (a admit) Admit(ctx context.Context, attr admission.Attributes, obj admission.ObjectInterfaces) (err error) { +// Admit always pass +func (a admit) Admit(context.Context, admission.Attributes, admission.ObjectInterfaces) error { return nil } -func (a admit) Handles(operation admission.Operation) bool { +// Handles always true +func (a admit) Handles(admission.Operation) bool { return true } var _ admission.MutationInterface = admit{} + var _ admission.ValidationInterface = admit{} diff --git a/pkg/proxy/api_resources.go b/pkg/proxy/api_resources.go index e7a42c58..626e29e2 100644 --- a/pkg/proxy/api_resources.go +++ b/pkg/proxy/api_resources.go @@ -17,9 +17,11 @@ limitations under the License. package proxy import ( + "errors" "fmt" "net/http" "reflect" + "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,12 +53,39 @@ type apiResources struct { } type resourceOptions struct { - path string - storage apirest.Storage - admit admission.Interface + path string + resource string // generate by path + subresource string // generate by path + resourcePath string // generate by path + itemPath string // generate by path + storage apirest.Storage + admit admission.Interface } -func newApiIResources(gv schema.GroupVersion) *apiResources { +func (o *resourceOptions) init() error { + // checks if the given storage path is the path of a subresource + switch parts := strings.Split(o.path, "/"); len(parts) { + case 2: + o.resource, o.subresource = parts[0], parts[1] + o.resourcePath = "/namespaces/{namespace}/" + o.resource + o.itemPath = "/namespaces/{namespace}/" + o.resource + "/{name}" + case 1: + o.resource = parts[0] + o.resourcePath = "/namespaces/{namespace}/" + o.resource + "/{name}/" + o.subresource + o.itemPath = "/namespaces/{namespace}/" + o.resource + "/{name}/" + o.subresource + default: + return errors.New("api_installer allows only one or two segment paths (resource or resource/subresource)") + } + + if o.admit == nil { + // set default admit + o.admit = newAlwaysAdmit() + } + + return nil +} + +func newAPIIResources(gv schema.GroupVersion) *apiResources { return &apiResources{ gv: gv, prefix: "/apis/" + gv.String(), @@ -67,21 +96,24 @@ func newApiIResources(gv schema.GroupVersion) *apiResources { } } +// AddResource add a api-resources func (r *apiResources) AddResource(o resourceOptions) error { - if o.admit == nil { - // set default admit - o.admit = newAlwaysAdmit() + if err := o.init(); err != nil { + klog.V(6).ErrorS(err, "Failed to initialize resourceOptions") + + return err } r.resourceOptions = append(r.resourceOptions, o) storageVersionProvider, isStorageVersionProvider := o.storage.(apirest.StorageVersionProvider) var apiResource metav1.APIResource - if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && - isStorageVersionProvider && + if isStorageVersionProvider && + utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && storageVersionProvider.StorageVersion() != nil { versioner := storageVersionProvider.StorageVersion() gvk, err := getStorageVersionKind(versioner, o.storage, r.typer) if err != nil { - klog.V(4).ErrorS(err, "failed to get storage version kind", "storage", reflect.TypeOf(o.storage)) + klog.V(6).ErrorS(err, "failed to get storage version kind", "storage", reflect.TypeOf(o.storage)) + return err } apiResource.Group = gvk.Group @@ -98,11 +130,8 @@ func (r *apiResources) AddResource(o resourceOptions) error { if categoriesProvider, ok := o.storage.(apirest.CategoriesProvider); ok { apiResource.Categories = categoriesProvider.Categories() } - _, subResource, err := splitSubresource(o.path) - if err != nil { - return err - } - if subResource == "" { + + if o.subresource == "" { singularNameProvider, ok := o.storage.(apirest.SingularNameProvider) if !ok { return fmt.Errorf("resource %s must implement SingularNameProvider", o.path) @@ -110,10 +139,11 @@ func (r *apiResources) AddResource(o resourceOptions) error { apiResource.SingularName = singularNameProvider.GetSingularName() } r.list = append(r.list, apiResource) + return nil } -func (r *apiResources) handlerApiResources() http.HandlerFunc { +func (r *apiResources) handlerAPIResources() http.HandlerFunc { return func(writer http.ResponseWriter, request *http.Request) { responsewriters.WriteObjectNegotiated(r.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, writer, request, http.StatusOK, &metav1.APIResourceList{GroupVersion: r.gv.String(), APIResources: r.list}, false) @@ -131,5 +161,6 @@ func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage apir if !ok { return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object)) } + return gvk, nil } diff --git a/pkg/proxy/internal/file_storage.go b/pkg/proxy/internal/file_storage.go index bda9d910..1997f613 100644 --- a/pkg/proxy/internal/file_storage.go +++ b/pkg/proxy/internal/file_storage.go @@ -45,7 +45,7 @@ const ( yamlSuffix = ".yaml" ) -func newFileStorage(prefix string, resource schema.GroupResource, codec runtime.Codec, newFunc func() runtime.Object) (apistorage.Interface, factory.DestroyFunc, error) { +func newFileStorage(prefix string, resource schema.GroupResource, codec runtime.Codec, newFunc func() runtime.Object) (apistorage.Interface, factory.DestroyFunc) { return &fileStorage{ prefix: prefix, versioner: apistorage.APIObjectVersioner{}, @@ -54,7 +54,7 @@ func newFileStorage(prefix string, resource schema.GroupResource, codec runtime. newFunc: newFunc, }, func() { // do nothing - }, nil + } } type fileStorage struct { @@ -68,99 +68,119 @@ type fileStorage struct { var _ apistorage.Interface = &fileStorage{} +// Versioner of local resource files. func (s fileStorage) Versioner() apistorage.Versioner { return s.versioner } -func (s fileStorage) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { +// Create local resource files. +func (s fileStorage) Create(_ context.Context, key string, obj, out runtime.Object, _ uint64) error { // set resourceVersion to obj metaObj, err := meta.Accessor(obj) if err != nil { - klog.V(4).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key)) + klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key)) + return err } metaObj.SetResourceVersion("1") // create file to local disk if _, err := os.Stat(filepath.Dir(key)); err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(key), os.ModePerm); err != nil { - klog.V(4).ErrorS(err, "failed to create dir", "path", filepath.Dir(key)) - return err - } - } else { - klog.V(4).ErrorS(err, "failed to check dir", "path", filepath.Dir(key)) + if !os.IsNotExist(err) { + klog.V(6).ErrorS(err, "failed to check dir", "path", filepath.Dir(key)) + + return err + } + if err := os.MkdirAll(filepath.Dir(key), os.ModePerm); err != nil { + klog.V(6).ErrorS(err, "failed to create dir", "path", filepath.Dir(key)) + return err } } data, err := runtime.Encode(s.codec, obj) if err != nil { - klog.V(4).ErrorS(err, "failed to encode resource file", "path", key) + klog.V(6).ErrorS(err, "failed to encode resource file", "path", key) + return err } // render to out if out != nil { err = decode(s.codec, data, out) if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "path", key) + klog.V(6).ErrorS(err, "failed to decode resource file", "path", key) + return err } } // render to file if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil { - klog.V(4).ErrorS(err, "failed to create resource file", "path", key) + klog.V(6).ErrorS(err, "failed to create resource file", "path", key) + return err } + return nil } +// Delete local resource files. func (s fileStorage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *apistorage.Preconditions, validateDeletion apistorage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { if cachedExistingObject != nil { out = cachedExistingObject } else { if err := s.Get(ctx, key, apistorage.GetOptions{}, out); err != nil { - klog.V(4).ErrorS(err, "failed to get resource", "path", key) + klog.V(6).ErrorS(err, "failed to get resource", "path", key) + return err } } if err := preconditions.Check(key, out); err != nil { - klog.V(4).ErrorS(err, "failed to check preconditions", "path", key) + klog.V(6).ErrorS(err, "failed to check preconditions", "path", key) + return err } if err := validateDeletion(ctx, out); err != nil { - klog.V(4).ErrorS(err, "failed to validate deletion", "path", key) + klog.V(6).ErrorS(err, "failed to validate deletion", "path", key) + return err } // delete object // rename file to trigger watcher if err := os.Rename(key+yamlSuffix, key+yamlSuffix+deleteTagSuffix); err != nil { - klog.V(4).ErrorS(err, "failed to rename resource file", "path", key) + klog.V(6).ErrorS(err, "failed to rename resource file", "path", key) + return err } + return nil } -func (s fileStorage) Watch(ctx context.Context, key string, opts apistorage.ListOptions) (watch.Interface, error) { +// Watch local resource files. +func (s fileStorage) Watch(_ context.Context, key string, _ apistorage.ListOptions) (watch.Interface, error) { return newFileWatcher(s.prefix, key, s.codec, s.newFunc) } -func (s fileStorage) Get(ctx context.Context, key string, opts apistorage.GetOptions, out runtime.Object) error { +// Get local resource files. +func (s fileStorage) Get(_ context.Context, key string, _ apistorage.GetOptions, out runtime.Object) error { data, err := os.ReadFile(key + yamlSuffix) if err != nil { - klog.V(4).ErrorS(err, "failed to read resource file", "path", key) + klog.V(6).ErrorS(err, "failed to read resource file", "path", key) + return err } if err := decode(s.codec, data, out); err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "path", key) + klog.V(6).ErrorS(err, "failed to decode resource file", "path", key) + return err } + return nil } -func (s fileStorage) GetList(ctx context.Context, key string, opts apistorage.ListOptions, listObj runtime.Object) error { +// GetList local resource files. +func (s fileStorage) GetList(_ context.Context, key string, opts apistorage.ListOptions, listObj runtime.Object) error { listPtr, err := meta.GetItemsPtr(listObj) if err != nil { return err @@ -170,215 +190,222 @@ func (s fileStorage) GetList(ctx context.Context, key string, opts apistorage.Li return fmt.Errorf("need ptr to slice: %w", err) } - // lastKey in result. + // Build matching rules for resource version and continue key. + resourceVersionMatchRule, continueKeyMatchRule, err := s.buildMatchRules(key, opts, &sync.Once{}) + if err != nil { + return err + } + + // Get the root entries in the directory corresponding to 'key'. + rootEntries, isAllNamespace, err := s.getRootEntries(key) + if err != nil { + return err + } + var lastKey string var hasMore bool - // resourceVersionMatchRule is a function that returns true if the resource version matches the rule. - var resourceVersionMatchRule = func(uint64) bool { - // default rule is to match all resource versions - return true - } - var continueKeyMatchRule = func(key string) bool { - // default rule - return strings.HasSuffix(key, yamlSuffix) + // Iterate over root entries, processing either directories or files. + for i, entry := range rootEntries { + if isAllNamespace { + // Process namespace directory. + err = s.processNamespaceDirectory(key, entry, v, continueKeyMatchRule, resourceVersionMatchRule, &lastKey, &hasMore, opts, listObj) + } else { + // Process individual resource file. + err = s.processResourceFile(key, entry, v, continueKeyMatchRule, resourceVersionMatchRule, &lastKey, opts, listObj) + } + if err != nil { + return err + } + // Check if we have reached the limit of results requested by the client. + if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit { + hasMore = i != len(rootEntries)-1 + + break + } } + // Handle the final result after all entries have been processed. + return s.handleResult(listObj, v, lastKey, hasMore) +} + +// buildMatchRules creates the match rules for resource version and continue key based on the given options. +func (s fileStorage) buildMatchRules(key string, opts apistorage.ListOptions, startReadOnce *sync.Once) (func(uint64) bool, func(string) bool, error) { + resourceVersionMatchRule := func(uint64) bool { return true } + continueKeyMatchRule := func(key string) bool { return strings.HasSuffix(key, yamlSuffix) } switch { case opts.Recursive && opts.Predicate.Continue != "": - // The format of continueKey is: namespace/resourceName/name.yaml - // continueKey is localPath which resources store. + // If continue token is present, set up a rule to start reading after the continueKey. continueKey, _, err := apistorage.DecodeContinue(opts.Predicate.Continue, key) if err != nil { - klog.V(4).ErrorS(err, "failed to parse continueKey", "continueKey", opts.Predicate.Continue) - return fmt.Errorf("invalid continue token: %w", err) + klog.V(6).ErrorS(err, "failed to parse continueKey", "continueKey", opts.Predicate.Continue) + + return nil, nil, fmt.Errorf("invalid continue token: %w", err) } - startReadOnce := sync.Once{} + continueKeyMatchRule = func(key string) bool { - var startRead bool + startRead := false if key == continueKey { - startReadOnce.Do(func() { - startRead = true - }) + startReadOnce.Do(func() { startRead = true }) } - // start read after continueKey (not contain). Because it has read in last result. + return startRead && key != continueKey } case opts.ResourceVersion != "": + // Handle resource version matching based on the provided match rule. parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) if err != nil { - return fmt.Errorf("invalid resource version: %w", err) + return nil, nil, fmt.Errorf("invalid resource version: %w", err) } switch opts.ResourceVersionMatch { case metav1.ResourceVersionMatchNotOlderThan: - resourceVersionMatchRule = func(u uint64) bool { - return u >= parsedRV - } + resourceVersionMatchRule = func(u uint64) bool { return u >= parsedRV } case metav1.ResourceVersionMatchExact: - resourceVersionMatchRule = func(u uint64) bool { - return u == parsedRV - } - case "": // legacy case - // use default rule. match all resource versions. + resourceVersionMatchRule = func(u uint64) bool { return u == parsedRV } + case "": + // Legacy case: match all resource versions. default: - return fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch) + return nil, nil, fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch) } } + return resourceVersionMatchRule, continueKeyMatchRule, nil +} + +// getRootEntries reads the directory entries at the given key path. +func (s fileStorage) getRootEntries(key string) ([]os.DirEntry, bool, error) { + var allNamespace bool switch len(filepath.SplitList(strings.TrimPrefix(key, s.prefix))) { case 0: // read all namespace's resources - // Traverse the resource storage directory. startRead after continueKey. // Traverse the resource storage directory. startRead after continueKey. // get all resources from key. key is runtimeDir - rootEntries, err := os.ReadDir(key) - if err != nil && !os.IsNotExist(err) { - klog.V(4).ErrorS(err, "failed to read runtime dir", "path", key) - return err - } - for _, ns := range rootEntries { - if !ns.IsDir() { - continue - } - // the next dir is namespace. - nsDir := filepath.Join(key, ns.Name()) - entries, err := os.ReadDir(nsDir) - if err != nil { - if os.IsNotExist(err) { - continue - } - klog.V(4).ErrorS(err, "failed to read namespaces dir", "path", nsDir) - return err - } - - for _, e := range entries { - if e.IsDir() { - continue - } - // the next file is resource name. - currentKey := filepath.Join(nsDir, e.Name()) - if !continueKeyMatchRule(currentKey) { - continue - } - data, err := os.ReadFile(currentKey) - if err != nil { - if os.IsNotExist(err) { - continue - } - klog.V(4).ErrorS(err, "failed to read resource file", "path", currentKey) - return err - } - - obj, _, err := s.codec.Decode(data, nil, getNewItem(listObj, v)) - if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "path", currentKey) - return err - } - metaObj, err := meta.Accessor(obj) - if err != nil { - klog.V(4).ErrorS(err, "failed to get meta object", "path", currentKey) - return err - } - rv, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion()) - if err != nil { - klog.V(4).ErrorS(err, "failed to parse resource version", "resourceVersion", obj.(metav1.Object).GetResourceVersion()) - return err - } - if !resourceVersionMatchRule(rv) { - continue - } - if matched, err := opts.Predicate.Matches(obj); err == nil && matched { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - lastKey = currentKey - } - - if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit { - // got enough results. Stop the loop. - goto RESULT - } - } - } - hasMore = false + allNamespace = true case 1: // read a namespace's resources // Traverse the resource storage directory. startRead after continueKey. // get all resources from key. key is runtimeDir - rootEntries, err := os.ReadDir(key) - if err != nil && !os.IsNotExist(err) { - klog.V(4).ErrorS(err, "failed to read runtime dir", "path", key) - return err - } - for _, rf := range rootEntries { - if rf.IsDir() { - continue - } - // the next file is resource name. - currentKey := filepath.Join(key, rf.Name()) - if !continueKeyMatchRule(currentKey) { - continue - } - data, err := os.ReadFile(currentKey) - if err != nil { - klog.V(4).ErrorS(err, "failed to read resource file", "path", currentKey) - return err - } - - obj, _, err := s.codec.Decode(data, nil, getNewItem(listObj, v)) - if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "path", currentKey) - return err - } - metaObj, err := meta.Accessor(obj) - if err != nil { - klog.V(4).ErrorS(err, "failed to get meta object", "path", currentKey) - return err - } - rv, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion()) - if err != nil { - klog.V(4).ErrorS(err, "failed to parse resource version", "resourceVersion", obj.(metav1.Object).GetResourceVersion()) - return err - } - if !resourceVersionMatchRule(rv) { - continue - } - if matched, err := opts.Predicate.Matches(obj); err == nil && matched { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - lastKey = currentKey - } - - if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit { - // got enough results. Stop the loop. - goto RESULT - } - } - hasMore = false + allNamespace = false default: - klog.V(4).ErrorS(nil, "key is invalid", "key", key) - return fmt.Errorf("key is invalid: %s", key) + klog.V(6).ErrorS(nil, "key is invalid", "key", key) + + return nil, false, fmt.Errorf("key is invalid: %s", key) } -RESULT: - if v.IsNil() { - // Ensure that we never return a nil Items pointer in the result for consistency. - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + rootEntries, err := os.ReadDir(key) + if err != nil && !os.IsNotExist(err) { + klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key) + + return nil, allNamespace, err } - // instruct the client to begin querying from immediately after the last key we returned - // we never return a key that the client wouldn't be allowed to see - if hasMore { - // we want to start immediately after the last key - next, err := apistorage.EncodeContinue(lastKey+"\x00", key, 0) + return rootEntries, allNamespace, nil +} + +// processNamespaceDirectory handles the traversal and processing of a namespace directory. +func (s fileStorage) processNamespaceDirectory(key string, ns os.DirEntry, v reflect.Value, continueKeyMatchRule func(string) bool, resourceVersionMatchRule func(uint64) bool, lastKey *string, hasMore *bool, opts apistorage.ListOptions, listObj runtime.Object) error { + if !ns.IsDir() { + // only need dir. skip + return nil + } + nsDir := filepath.Join(key, ns.Name()) + entries, err := os.ReadDir(nsDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", nsDir) + + return err + } + + for _, entry := range entries { + err := s.processResourceFile(nsDir, entry, v, continueKeyMatchRule, resourceVersionMatchRule, lastKey, opts, listObj) if err != nil { return err } - // Unable to calculate remainingItemCount currently. - // todo Store the resourceVersion in the file data. No resourceVersion strategy for List Object currently. - // resourceVersion default set 1 + // Check if we have reached the limit of results requested by the client. + if opts.Predicate.Limit != 0 && int64(v.Len()) >= opts.Predicate.Limit { + *hasMore = true + + return nil + } + } + + return nil +} + +// processResourceFile handles reading, decoding, and processing a single resource file. +func (s fileStorage) processResourceFile(parentDir string, entry os.DirEntry, v reflect.Value, continueKeyMatchRule func(string) bool, resourceVersionMatchRule func(uint64) bool, lastKey *string, opts apistorage.ListOptions, listObj runtime.Object) error { + if entry.IsDir() { + // only need file. skip + return nil + } + currentKey := filepath.Join(parentDir, entry.Name()) + if !continueKeyMatchRule(currentKey) { + return nil + } + + data, err := os.ReadFile(currentKey) + if err != nil { + klog.V(6).ErrorS(err, "failed to read resource file", "path", currentKey) + + return err + } + + obj, _, err := s.codec.Decode(data, nil, getNewItem(listObj, v)) + if err != nil { + klog.V(6).ErrorS(err, "failed to decode resource file", "path", currentKey) + + return err + } + + metaObj, err := meta.Accessor(obj) + if err != nil { + klog.V(6).ErrorS(err, "failed to get meta object", "path", currentKey) + + return err + } + + rv, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion()) + if err != nil { + klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion()) + + return err + } + + // Apply the resource version match rule. + if !resourceVersionMatchRule(rv) { + return nil + } + + // Check if the object matches the given predicate. + if matched, err := opts.Predicate.Matches(obj); err == nil && matched { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + *lastKey = currentKey + } + + return nil +} + +// handleResult processes and finalizes the result before returning it. +func (s fileStorage) handleResult(listObj runtime.Object, v reflect.Value, lastKey string, hasMore bool) error { + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + + if hasMore { + // If there are more results, set the continuation token for the next query. + next, err := apistorage.EncodeContinue(lastKey+"\x00", "", 0) + if err != nil { + return err + } + return s.versioner.UpdateList(listObj, 1, next, nil) } - // no continuation - // resourceVersion default set 1 + // If no more results, return the final list without continuation. return s.versioner.UpdateList(listObj, 1, "", nil) } +// GuaranteedUpdate local resource file. func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool, preconditions *apistorage.Preconditions, tryUpdate apistorage.UpdateFunc, cachedExistingObject runtime.Object) error { var oldObj runtime.Object if cachedExistingObject != nil { @@ -386,59 +413,89 @@ func (s fileStorage) GuaranteedUpdate(ctx context.Context, key string, destinati } else { oldObj = s.newFunc() if err := s.Get(ctx, key, apistorage.GetOptions{IgnoreNotFound: ignoreNotFound}, oldObj); err != nil { - klog.V(4).ErrorS(err, "failed to get resource", "path", key) + klog.V(6).ErrorS(err, "failed to get resource", "path", key) + return err } } if err := preconditions.Check(key, oldObj); err != nil { - klog.V(4).ErrorS(err, "failed to check preconditions", "path", key) + klog.V(6).ErrorS(err, "failed to check preconditions", "path", key) + return err } // set resourceVersion to obj metaObj, err := meta.Accessor(oldObj) if err != nil { - klog.V(4).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key)) + klog.V(6).ErrorS(err, "failed to get meta object", "path", filepath.Dir(key)) + return err } oldVersion, err := s.versioner.ParseResourceVersion(metaObj.GetResourceVersion()) if err != nil { - klog.V(4).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion()) + klog.V(6).ErrorS(err, "failed to parse resource version", "resourceVersion", metaObj.GetResourceVersion()) + return err } out, _, err := tryUpdate(oldObj, apistorage.ResponseMeta{ResourceVersion: oldVersion + 1}) if err != nil { - klog.V(4).ErrorS(err, "failed to try update", "path", key) + klog.V(6).ErrorS(err, "failed to try update", "path", key) + return err } data, err := runtime.Encode(s.codec, out) if err != nil { - klog.V(4).ErrorS(err, "failed to encode resource file", "path", key) + klog.V(6).ErrorS(err, "failed to encode resource file", "path", key) + return err } // render to destination if destination != nil { err = decode(s.codec, data, destination) if err != nil { - klog.V(4).ErrorS(err, "failed to decode resource file", "path", key) + klog.V(6).ErrorS(err, "failed to decode resource file", "path", key) + return err } } // render to file if err := os.WriteFile(key+yamlSuffix, data, os.ModePerm); err != nil { - klog.V(4).ErrorS(err, "failed to create resource file", "path", key) + klog.V(6).ErrorS(err, "failed to create resource file", "path", key) + return err } + return nil } +// Count local resource file func (s fileStorage) Count(key string) (int64, error) { + // countByNSDir count the crd files by namespace dir. + countByNSDir := func(dir string) (int64, error) { + var count int64 + entries, err := os.ReadDir(dir) + if err != nil { + klog.V(6).ErrorS(err, "failed to read namespaces dir", "path", dir) + // cannot read namespace dir + return 0, err + } + // count the file + for _, entry := range entries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), yamlSuffix) { + count++ + } + } + + return count, nil + } + switch len(filepath.SplitList(strings.TrimPrefix(key, s.prefix))) { case 0: // count all namespace's resources var count int64 rootEntries, err := os.ReadDir(key) if err != nil && !os.IsNotExist(err) { - klog.V(4).ErrorS(err, "failed to read runtime dir", "path", key) + klog.V(6).ErrorS(err, "failed to read runtime dir", "path", key) + return 0, err } for _, ns := range rootEntries { @@ -446,40 +503,25 @@ func (s fileStorage) Count(key string) (int64, error) { continue } // the next dir is namespace. - nsDir := filepath.Join(key, ns.Name()) - entries, err := os.ReadDir(nsDir) + c, err := countByNSDir(filepath.Join(key, ns.Name())) if err != nil { - klog.V(4).ErrorS(err, "failed to read namespaces dir", "path", nsDir) return 0, err } - // count the file - for _, entry := range entries { - if !entry.IsDir() && strings.HasSuffix(entry.Name(), yamlSuffix) { - count++ - } - } + count += c } + return count, nil case 1: // count a namespace's resources - var count int64 - rootEntries, err := os.ReadDir(key) - if err != nil && !os.IsNotExist(err) { - klog.V(4).ErrorS(err, "failed to read runtime dir", "path", key) - return 0, err - } - for _, entry := range rootEntries { - if !entry.IsDir() && strings.HasSuffix(entry.Name(), yamlSuffix) { - count++ - } - } - return count, nil + return countByNSDir(key) default: - klog.V(4).ErrorS(nil, "key is invalid", "key", key) + klog.V(6).ErrorS(nil, "key is invalid", "key", key) + // not support key return 0, fmt.Errorf("key is invalid: %s", key) } } -func (s fileStorage) RequestWatchProgress(ctx context.Context) error { +// RequestWatchProgress do nothing. +func (s fileStorage) RequestWatchProgress(context.Context) error { return nil } @@ -493,6 +535,7 @@ func decode(codec runtime.Codec, value []byte, objPtr runtime.Object) error { if err != nil { return err } + return nil } @@ -500,10 +543,15 @@ func getNewItem(listObj runtime.Object, v reflect.Value) runtime.Object { // For unstructured lists with a target group/version, preserve the group/version in the instantiated list items if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured { if apiVersion := unstructuredList.GetAPIVersion(); apiVersion != "" { - return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}} + return &unstructured.Unstructured{Object: map[string]any{"apiVersion": apiVersion}} } } // Otherwise just instantiate an empty item elem := v.Type().Elem() - return reflect.New(elem).Interface().(runtime.Object) + if obj, ok := reflect.New(elem).Interface().(runtime.Object); ok { + return obj + } + klog.V(6).Info("elem is not runtime.Object") + + return nil } diff --git a/pkg/proxy/internal/rest_option.go b/pkg/proxy/internal/rest_option.go index e16b0971..5a28574c 100644 --- a/pkg/proxy/internal/rest_option.go +++ b/pkg/proxy/internal/rest_option.go @@ -34,6 +34,7 @@ import ( _const "github.com/kubesphere/kubekey/v4/pkg/const" ) +// NewFileRESTOptionsGetter return fileRESTOptionsGetter func NewFileRESTOptionsGetter(gv schema.GroupVersion) apigeneric.RESTOptionsGetter { return &fileRESTOptionsGetter{ gv: gv, @@ -49,6 +50,7 @@ func NewFileRESTOptionsGetter(gv schema.GroupVersion) apigeneric.RESTOptionsGett func newYamlCodec(gv schema.GroupVersion) runtime.Codec { yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, _const.Scheme, _const.Scheme, json.SerializerOptions{Yaml: true}) + return versioning.NewDefaultingCodecForScheme( _const.Scheme, yamlSerializer, @@ -58,13 +60,16 @@ func newYamlCodec(gv schema.GroupVersion) runtime.Codec { ) } +// fileRESTOptionsGetter local rest info type fileRESTOptionsGetter struct { gv schema.GroupVersion storageConfig *storagebackend.Config } +// GetRESTOptions return apigeneric.RESTOptions func (f fileRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (apigeneric.RESTOptions, error) { prefix := filepath.Join(_const.GetRuntimeDir(), f.gv.Group, f.gv.Version, resource.Resource) + return apigeneric.RESTOptions{ StorageConfig: f.storageConfig.ForResource(resource), Decorator: func(storageConfig *storagebackend.ConfigForResource, resourcePrefix string, @@ -74,10 +79,7 @@ func (f fileRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (ap getAttrsFunc apistorage.AttrFunc, triggerFuncs apistorage.IndexerFuncs, indexers *cgtoolscache.Indexers) (apistorage.Interface, factory.DestroyFunc, error) { - s, d, err := newFileStorage(prefix, resource, storageConfig.Codec, newFunc) - if err != nil { - return s, d, err - } + s, d := newFileStorage(prefix, resource, storageConfig.Codec, newFunc) cacherConfig := cacherstorage.Config{ Storage: s, diff --git a/pkg/proxy/internal/watcher.go b/pkg/proxy/internal/watcher.go index 1d86c017..90e1cd20 100644 --- a/pkg/proxy/internal/watcher.go +++ b/pkg/proxy/internal/watcher.go @@ -21,13 +21,15 @@ import ( "path/filepath" "strings" - "github.com/fsnotify/fsnotify" "k8s.io/apimachinery/pkg/api/meta" + + "github.com/fsnotify/fsnotify" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/klog/v2" ) +// fileWatcher watcher local dir resource files. type fileWatcher struct { prefix string codec runtime.Codec @@ -36,38 +38,43 @@ type fileWatcher struct { watchEvents chan watch.Event } +// newFileWatcher get fileWatcher func newFileWatcher(prefix, path string, codec runtime.Codec, newFunc func() runtime.Object) (watch.Interface, error) { if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { - return nil, err - } - } else { - klog.V(4).ErrorS(err, "failed to stat path", "path", path) + if !os.IsNotExist(err) { + klog.V(6).ErrorS(err, "failed to stat path", "path", path) + + return nil, err + } + if err := os.MkdirAll(path, os.ModePerm); err != nil { return nil, err } } watcher, err := fsnotify.NewWatcher() if err != nil { - klog.V(4).ErrorS(err, "failed to create file watcher", "path", path) + klog.V(6).ErrorS(err, "failed to create file watcher", "path", path) + return nil, err } if err := watcher.Add(path); err != nil { - klog.V(4).ErrorS(err, "failed to add path to file watcher", "path", path) + klog.V(6).ErrorS(err, "failed to add path to file watcher", "path", path) + return nil, err } // add namespace dir to watcher if prefix == path { entry, err := os.ReadDir(prefix) if err != nil { - klog.V(4).ErrorS(err, "failed to read dir", "dir", path) + klog.V(6).ErrorS(err, "failed to read dir", "dir", path) + return nil, err } for _, e := range entry { if e.IsDir() { if err := watcher.Add(filepath.Join(prefix, e.Name())); err != nil { - klog.V(4).ErrorS(err, "failed to add namespace dir to file watcher", "dir", e.Name()) + klog.V(6).ErrorS(err, "failed to add namespace dir to file watcher", "dir", e.Name()) + return nil, err } } @@ -83,22 +90,23 @@ func newFileWatcher(prefix, path string, codec runtime.Codec, newFunc func() run } go w.watch() + return w, nil } +// Stop watch func (w *fileWatcher) Stop() { if err := w.watcher.Close(); err != nil { - klog.V(4).ErrorS(err, "failed to close file watcher") + klog.V(6).ErrorS(err, "failed to close file watcher") } } +// ResultChan get watch event func (w *fileWatcher) ResultChan() <-chan watch.Event { return w.watchEvents } func (w *fileWatcher) watch() { - // stop the watcher - //defer f.Stop() for { select { case event := <-w.watcher.Events: @@ -108,6 +116,7 @@ func (w *fileWatcher) watch() { entry, err := os.Stat(event.Name) if err != nil { klog.V(6).ErrorS(err, "failed to stat resource file", "event", event) + continue } if entry.IsDir() && len(filepath.SplitList(strings.TrimPrefix(event.Name, w.prefix))) == 1 { @@ -121,74 +130,78 @@ func (w *fileWatcher) watch() { if err := w.watcher.Remove(event.Name); err != nil { klog.V(6).ErrorS(err, "failed to remove namespace dir to file watcher", "event", event) } + default: + // do nothing } + continue } - // change is resource file - if strings.HasSuffix(event.Name, yamlSuffix) { - data, err := os.ReadFile(event.Name) - if err != nil { - klog.V(6).ErrorS(err, "failed to read resource file", "event", event) - continue - } - - switch event.Op { - case fsnotify.Create: - obj, _, err := w.codec.Decode(data, nil, w.newFunc()) - if err != nil { - klog.V(6).ErrorS(err, "failed to decode resource file", "event", event) - continue - } - metaObj, err := meta.Accessor(obj) - if err != nil { - klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event) - continue - } - if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file - klog.V(6).InfoS("name is empty. ignore", "event", event) - continue - } - w.watchEvents <- watch.Event{ - Type: watch.Added, - Object: obj, - } - case fsnotify.Write: - obj, _, err := w.codec.Decode(data, nil, w.newFunc()) - if err != nil { - klog.V(6).ErrorS(err, "failed to decode resource file", "event", event) - continue - } - metaObj, err := meta.Accessor(obj) - if err != nil { - klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event) - continue - } - if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file - klog.V(6).InfoS("name is empty. ignore", "event", event) - continue - } - if strings.HasSuffix(filepath.Base(event.Name), deleteTagSuffix) { - // delete event - w.watchEvents <- watch.Event{ - Type: watch.Deleted, - Object: obj, - } - if err := os.Remove(event.Name); err != nil { - klog.ErrorS(err, "failed to remove file", "event", event) - } - } else { - // update event - w.watchEvents <- watch.Event{ - Type: watch.Modified, - Object: obj, - } - } - } + if err := w.watchFile(event); err != nil { + klog.V(6).ErrorS(err, "watch resource file error") } + case err := <-w.watcher.Errors: - klog.V(4).ErrorS(err, "file watcher error") + klog.V(6).ErrorS(err, "file watcher error") + return } } } + +// watchFile for resource. +func (w *fileWatcher) watchFile(event fsnotify.Event) error { + if !strings.HasSuffix(event.Name, yamlSuffix) { + return nil + } + data, err := os.ReadFile(event.Name) + if err != nil { + klog.V(6).ErrorS(err, "failed to read resource file", "event", event) + + return err + } + obj, _, err := w.codec.Decode(data, nil, w.newFunc()) + if err != nil { + klog.V(6).ErrorS(err, "failed to decode resource file", "event", event) + + return err + } + metaObj, err := meta.Accessor(obj) + if err != nil { + klog.V(6).ErrorS(err, "failed to convert to metaObject", "event", event) + + return err + } + if metaObj.GetName() == "" && metaObj.GetGenerateName() == "" { // ignore unknown file + klog.V(6).InfoS("name is empty. ignore", "event", event) + + return nil + } + + switch event.Op { + case fsnotify.Create: + w.watchEvents <- watch.Event{ + Type: watch.Added, + Object: obj, + } + case fsnotify.Write: + if strings.HasSuffix(filepath.Base(event.Name), deleteTagSuffix) { + // delete event + w.watchEvents <- watch.Event{ + Type: watch.Deleted, + Object: obj, + } + if err := os.Remove(event.Name); err != nil { + klog.ErrorS(err, "failed to remove file", "event", event) + } + } else { + // update event + w.watchEvents <- watch.Event{ + Type: watch.Modified, + Object: obj, + } + } + } + + return nil +} diff --git a/pkg/proxy/path_expression.go b/pkg/proxy/path_expression.go index 70e2fc21..bf51ff64 100644 --- a/pkg/proxy/path_expression.go +++ b/pkg/proxy/path_expression.go @@ -46,14 +46,20 @@ func newPathExpression(path string) (*pathExpression, error) { if err != nil { return nil, err } + return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil } // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { +func templateToRegularExpression(template string) (string, int, []string, int, []string) { + var ( + literalCount int + varNames []string + varCount int + ) var buffer bytes.Buffer buffer.WriteString("^") - tokens = tokenizePath(template) + tokens := tokenizePath(template) for _, each := range tokens { if each == "" { continue @@ -78,13 +84,14 @@ func templateToRegularExpression(template string) (expression string, literalCou buffer.WriteString("([^/]+?)") } varNames = append(varNames, varName) - varCount += 1 + varCount++ } else { literalCount += len(each) - encoded := each // TODO URI encode + encoded := each buffer.WriteString(regexp.QuoteMeta(encoded)) } } + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens } @@ -93,17 +100,6 @@ func tokenizePath(path string) []string { if path == "/" { return nil } - if TrimRightSlashEnabled { - // 3.9.0 - return strings.Split(strings.Trim(path, "/"), "/") - } else { - // 3.10.2 - return strings.Split(strings.TrimLeft(path, "/"), "/") - } + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") } - -// TrimRightSlashEnabled controls whether -// - path on route building is using path.Join -// - the path of the incoming request is trimmed of its slash suffux. -// Value of true matches the behavior of <= 3.9.0 -var TrimRightSlashEnabled = true diff --git a/pkg/proxy/resources/config/storage.go b/pkg/proxy/resources/config/storage.go index 4e9287c8..21124b39 100644 --- a/pkg/proxy/resources/config/storage.go +++ b/pkg/proxy/resources/config/storage.go @@ -25,14 +25,17 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// ConfigStorage storage for Config type ConfigStorage struct { Config *REST } +// REST resource for Config type REST struct { *apiregistry.Store } +// NewStorage for Config func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (ConfigStorage, error) { store := &apiregistry.Store{ NewFunc: func() runtime.Object { return &kkcorev1.Config{} }, @@ -47,9 +50,11 @@ func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (ConfigStorage, error) TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1.SchemeGroupVersion.WithResource("configs").GroupResource()), } + options := &apigeneric.StoreOptions{ RESTOptions: optsGetter, } + if err := store.CompleteWithOptions(options); err != nil { return ConfigStorage{}, err } diff --git a/pkg/proxy/resources/config/strategy.go b/pkg/proxy/resources/config/strategy.go index d7863c26..daa2ee45 100644 --- a/pkg/proxy/resources/config/strategy.go +++ b/pkg/proxy/resources/config/strategy.go @@ -39,54 +39,65 @@ var Strategy = ConfigStrategy{_const.Scheme, apinames.SimpleNameGenerator} // ===CreateStrategy=== +// NamespaceScoped always true func (t ConfigStrategy) NamespaceScoped() bool { return true } -func (t ConfigStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { +// PrepareForCreate do no-thing +func (t ConfigStrategy) PrepareForCreate(context.Context, runtime.Object) { // do nothing } -func (t ConfigStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { +// Validate always pass +func (t ConfigStrategy) Validate(context.Context, runtime.Object) field.ErrorList { // do nothing return nil } -func (t ConfigStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { +// WarningsOnCreate do no-thing +func (t ConfigStrategy) WarningsOnCreate(context.Context, runtime.Object) []string { // do nothing return nil } -func (t ConfigStrategy) Canonicalize(obj runtime.Object) { +// Canonicalize do no-thing +func (t ConfigStrategy) Canonicalize(runtime.Object) { // do nothing } // ===UpdateStrategy=== +// AllowCreateOnUpdate always false func (t ConfigStrategy) AllowCreateOnUpdate() bool { return false } -func (t ConfigStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { +// PrepareForUpdate do no-thing +func (t ConfigStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) { // do nothing } -func (t ConfigStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { +// ValidateUpdate do nothing +func (t ConfigStrategy) ValidateUpdate(context.Context, runtime.Object, runtime.Object) field.ErrorList { // do nothing return nil } -func (t ConfigStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { +// WarningsOnUpdate always nil +func (t ConfigStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string { // do nothing return nil } +// AllowUnconditionalUpdate always true func (t ConfigStrategy) AllowUnconditionalUpdate() bool { return true } // ===ResetFieldsStrategy=== +// GetResetFields always nil func (t ConfigStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return nil } diff --git a/pkg/proxy/resources/inventory/storage.go b/pkg/proxy/resources/inventory/storage.go index 95d6a6fd..1c37b4ad 100644 --- a/pkg/proxy/resources/inventory/storage.go +++ b/pkg/proxy/resources/inventory/storage.go @@ -25,14 +25,17 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// InventoryStorage storage for Inventory type InventoryStorage struct { Inventory *REST } +// REST resource for Inventory type REST struct { *apiregistry.Store } +// NewStorage for Inventory func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (InventoryStorage, error) { store := &apiregistry.Store{ NewFunc: func() runtime.Object { return &kkcorev1.Inventory{} }, diff --git a/pkg/proxy/resources/inventory/strategy.go b/pkg/proxy/resources/inventory/strategy.go index 8e5844aa..97be9587 100644 --- a/pkg/proxy/resources/inventory/strategy.go +++ b/pkg/proxy/resources/inventory/strategy.go @@ -27,66 +27,75 @@ import ( _const "github.com/kubesphere/kubekey/v4/pkg/const" ) -// pipelineStrategy implements behavior for Pods -type pipelineStrategy struct { +// inventoryStrategy implements behavior for Pods +type inventoryStrategy struct { runtime.ObjectTyper apinames.NameGenerator } // Strategy is the default logic that applies when creating and updating Pod // objects via the REST API. -var Strategy = pipelineStrategy{_const.Scheme, apinames.SimpleNameGenerator} +var Strategy = inventoryStrategy{_const.Scheme, apinames.SimpleNameGenerator} // ===CreateStrategy=== -func (t pipelineStrategy) NamespaceScoped() bool { +// NamespaceScoped always true +func (t inventoryStrategy) NamespaceScoped() bool { return true } -func (t pipelineStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { +// PrepareForCreate do no-thing +func (t inventoryStrategy) PrepareForCreate(context.Context, runtime.Object) { // do nothing } -func (t pipelineStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { +// Validate always pass +func (t inventoryStrategy) Validate(context.Context, runtime.Object) field.ErrorList { // do nothing return nil } -func (t pipelineStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { +// WarningsOnCreate do no-thing +func (t inventoryStrategy) WarningsOnCreate(context.Context, runtime.Object) []string { // do nothing return nil } -func (t pipelineStrategy) Canonicalize(obj runtime.Object) { +// Canonicalize do no-thing +func (t inventoryStrategy) Canonicalize(runtime.Object) { // do nothing } // ===UpdateStrategy=== -func (t pipelineStrategy) AllowCreateOnUpdate() bool { +// AllowCreateOnUpdate always false +func (t inventoryStrategy) AllowCreateOnUpdate() bool { return false } -func (t pipelineStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { - // do nothing -} +// PrepareForUpdate do no-thing +func (t inventoryStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {} -func (t pipelineStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { +// ValidateUpdate do nothing +func (t inventoryStrategy) ValidateUpdate(context.Context, runtime.Object, runtime.Object) field.ErrorList { // do nothing return nil } -func (t pipelineStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { +// WarningsOnUpdate always nil +func (t inventoryStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string { // do nothing return nil } -func (t pipelineStrategy) AllowUnconditionalUpdate() bool { +// AllowUnconditionalUpdate always true +func (t inventoryStrategy) AllowUnconditionalUpdate() bool { return true } // ===ResetFieldsStrategy=== -func (t pipelineStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { +// GetResetFields always nil +func (t inventoryStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return nil } diff --git a/pkg/proxy/resources/pipeline/storage.go b/pkg/proxy/resources/pipeline/storage.go index 792a93e1..67e225df 100644 --- a/pkg/proxy/resources/pipeline/storage.go +++ b/pkg/proxy/resources/pipeline/storage.go @@ -29,19 +29,23 @@ import ( kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" ) +// PipelineStorage storage for Pipeline type PipelineStorage struct { Pipeline *REST PipelineStatus *StatusREST } +// REST resource for Pipeline type REST struct { *apiregistry.Store } +// StatusREST status subresource for Pipeline type StatusREST struct { store *apiregistry.Store } +// NamespaceScoped is true for Pipeline func (r *StatusREST) NamespaceScoped() bool { return true } @@ -63,7 +67,7 @@ func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOp } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { +func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, _ bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because // subresources should never allow create on update. return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options) @@ -74,9 +78,12 @@ func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return r.store.GetResetFields() } +// ConvertToTable print table view func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { return r.store.ConvertToTable(ctx, object, tableOptions) } + +// NewStorage for Pipeline storage func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (PipelineStorage, error) { store := &apiregistry.Store{ NewFunc: func() runtime.Object { return &kkcorev1.Pipeline{} }, diff --git a/pkg/proxy/resources/pipeline/strategy.go b/pkg/proxy/resources/pipeline/strategy.go index 0c04a049..b7d01d14 100644 --- a/pkg/proxy/resources/pipeline/strategy.go +++ b/pkg/proxy/resources/pipeline/strategy.go @@ -18,6 +18,7 @@ package pipeline import ( "context" + "errors" "reflect" "k8s.io/apimachinery/pkg/runtime" @@ -41,59 +42,75 @@ var Strategy = pipelineStrategy{_const.Scheme, apinames.SimpleNameGenerator} // ===CreateStrategy=== +// NamespaceScoped always true func (t pipelineStrategy) NamespaceScoped() bool { return true } -func (t pipelineStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { - // do nothing -} +// PrepareForCreate do no-thing +func (t pipelineStrategy) PrepareForCreate(context.Context, runtime.Object) {} -func (t pipelineStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { +// Validate always pass +func (t pipelineStrategy) Validate(context.Context, runtime.Object) field.ErrorList { // do nothing return nil } -func (t pipelineStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { +// WarningsOnCreate do no-thing +func (t pipelineStrategy) WarningsOnCreate(context.Context, runtime.Object) []string { // do nothing return nil } -func (t pipelineStrategy) Canonicalize(obj runtime.Object) { +// Canonicalize do no-thing +func (t pipelineStrategy) Canonicalize(runtime.Object) { // do nothing } // ===UpdateStrategy=== +// AllowCreateOnUpdate always false func (t pipelineStrategy) AllowCreateOnUpdate() bool { return false } -func (t pipelineStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { +// PrepareForUpdate do no-thing +func (t pipelineStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) { // do nothing } -func (t pipelineStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { +// ValidateUpdate spec is immutable +func (t pipelineStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList { // only support update status - task := obj.(*kkcorev1.Pipeline) - oldTask := old.(*kkcorev1.Pipeline) - if !reflect.DeepEqual(task.Spec, oldTask.Spec) { + pipeline, ok := obj.(*kkcorev1.Pipeline) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))} + } + oldPipeline, ok := old.(*kkcorev1.Pipeline) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))} + } + if !reflect.DeepEqual(pipeline.Spec, oldPipeline.Spec) { return field.ErrorList{field.Forbidden(field.NewPath("spec"), "spec is immutable")} } + return nil } -func (t pipelineStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { +// WarningsOnUpdate always nil +func (t pipelineStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string { // do nothing return nil } +// AllowUnconditionalUpdate always true func (t pipelineStrategy) AllowUnconditionalUpdate() bool { return true } // ===ResetFieldsStrategy=== +// GetResetFields always nil func (t pipelineStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return nil } diff --git a/pkg/proxy/resources/task/storage.go b/pkg/proxy/resources/task/storage.go index 54155fb4..7e098fbd 100644 --- a/pkg/proxy/resources/task/storage.go +++ b/pkg/proxy/resources/task/storage.go @@ -30,19 +30,23 @@ import ( kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" ) +// TaskStorage storage for Task type TaskStorage struct { Task *REST TaskStatus *StatusREST } +// REST resource for Task type REST struct { *apiregistry.Store } +// StatusREST status subresource for Task type StatusREST struct { store *apiregistry.Store } +// NamespaceScoped is true for Task func (r *StatusREST) NamespaceScoped() bool { return true } @@ -64,7 +68,7 @@ func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOp } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { +func (r *StatusREST) Update(ctx context.Context, name string, objInfo apirest.UpdatedObjectInfo, createValidation apirest.ValidateObjectFunc, updateValidation apirest.ValidateObjectUpdateFunc, _ bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because // subresources should never allow create on update. return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options) @@ -75,10 +79,12 @@ func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return r.store.GetResetFields() } +// ConvertToTable print table view func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { return r.store.ConvertToTable(ctx, object, tableOptions) } +// NewStorage for Task storage func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (TaskStorage, error) { store := &apiregistry.Store{ NewFunc: func() runtime.Object { return &kkcorev1alpha1.Task{} }, @@ -87,10 +93,9 @@ func NewStorage(optsGetter apigeneric.RESTOptionsGetter) (TaskStorage, error) { DefaultQualifiedResource: kkcorev1alpha1.SchemeGroupVersion.WithResource("tasks").GroupResource(), SingularQualifiedResource: kkcorev1alpha1.SchemeGroupVersion.WithResource("task").GroupResource(), - CreateStrategy: Strategy, - UpdateStrategy: Strategy, - DeleteStrategy: Strategy, - //ResetFieldsStrategy: Strategy, + CreateStrategy: Strategy, + UpdateStrategy: Strategy, + DeleteStrategy: Strategy, ReturnDeletedObject: true, TableConvertor: apirest.NewDefaultTableConvertor(kkcorev1alpha1.SchemeGroupVersion.WithResource("tasks").GroupResource()), diff --git a/pkg/proxy/resources/task/strategy.go b/pkg/proxy/resources/task/strategy.go index 33dbb19d..0f0c8bb8 100644 --- a/pkg/proxy/resources/task/strategy.go +++ b/pkg/proxy/resources/task/strategy.go @@ -18,7 +18,7 @@ package task import ( "context" - "fmt" + "errors" "reflect" "k8s.io/apimachinery/pkg/fields" @@ -50,72 +50,84 @@ var Strategy = taskStrategy{_const.Scheme, apinames.SimpleNameGenerator} // ===CreateStrategy=== +// NamespaceScoped always true func (t taskStrategy) NamespaceScoped() bool { return true } -func (t taskStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { +// PrepareForCreate set tasks status to pending +func (t taskStrategy) PrepareForCreate(_ context.Context, obj runtime.Object) { // init status when create - task := obj.(*kkcorev1alpha1.Task) - task.Status = kkcorev1alpha1.TaskStatus{ - Phase: kkcorev1alpha1.TaskPhasePending, + if task, ok := obj.(*kkcorev1alpha1.Task); ok { + task.Status = kkcorev1alpha1.TaskStatus{ + Phase: kkcorev1alpha1.TaskPhasePending, + } } } -func (t taskStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { - // do nothing +// Validate always pass +func (t taskStrategy) Validate(context.Context, runtime.Object) field.ErrorList { return nil } -func (t taskStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { - // do nothing +// WarningsOnCreate do no-thing +func (t taskStrategy) WarningsOnCreate(context.Context, runtime.Object) []string { return nil } -func (t taskStrategy) Canonicalize(obj runtime.Object) { - // do nothing -} +// Canonicalize do no-thing +func (t taskStrategy) Canonicalize(runtime.Object) {} // ===UpdateStrategy=== +// AllowCreateOnUpdate always false func (t taskStrategy) AllowCreateOnUpdate() bool { return false } -func (t taskStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { - // do nothing -} +// PrepareForUpdate do no-thing +func (t taskStrategy) PrepareForUpdate(context.Context, runtime.Object, runtime.Object) {} -func (t taskStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { +// ValidateUpdate spec is immutable +func (t taskStrategy) ValidateUpdate(_ context.Context, obj, old runtime.Object) field.ErrorList { // only support update status - task := obj.(*kkcorev1alpha1.Task) - oldTask := old.(*kkcorev1alpha1.Task) + task, ok := obj.(*kkcorev1alpha1.Task) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))} + } + oldTask, ok := old.(*kkcorev1alpha1.Task) + if !ok { + return field.ErrorList{field.InternalError(field.NewPath("spec"), errors.New("the object is not Task"))} + } if !reflect.DeepEqual(task.Spec, oldTask.Spec) { return field.ErrorList{field.Forbidden(field.NewPath("spec"), "spec is immutable")} } + return nil } -func (t taskStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { - // do nothing +// WarningsOnUpdate always nil +func (t taskStrategy) WarningsOnUpdate(context.Context, runtime.Object, runtime.Object) []string { return nil } +// AllowUnconditionalUpdate always true func (t taskStrategy) AllowUnconditionalUpdate() bool { return true } // ===ResetFieldsStrategy=== +// GetResetFields always nil func (t taskStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { return nil } // OwnerPipelineIndexFunc return value ownerReference.object is pipeline. -func OwnerPipelineIndexFunc(obj interface{}) ([]string, error) { +func OwnerPipelineIndexFunc(obj any) ([]string, error) { task, ok := obj.(*kkcorev1alpha1.Task) if !ok { - return nil, fmt.Errorf("not a task") + return nil, errors.New("not Task") } var index string @@ -125,11 +137,12 @@ func OwnerPipelineIndexFunc(obj interface{}) ([]string, error) { Namespace: task.Namespace, Name: reference.Name, }.String() + break } } if index == "" { - return nil, fmt.Errorf("task has no ownerReference.pipeline") + return nil, errors.New("task has no ownerReference.pipeline") } return []string{index}, nil @@ -143,10 +156,10 @@ func Indexers() *cgtoolscache.Indexers { } // MatchTask returns a generic matcher for a given label and field selector. -func MatchTask(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate { +func MatchTask(label labels.Selector, fd fields.Selector) apistorage.SelectionPredicate { return apistorage.SelectionPredicate{ Label: label, - Field: field, + Field: fd, GetAttrs: GetAttrs, IndexFields: []string{kkcorev1alpha1.TaskOwnerField}, } @@ -156,41 +169,45 @@ func MatchTask(label labels.Selector, field fields.Selector) apistorage.Selectio func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { task, ok := obj.(*kkcorev1alpha1.Task) if !ok { - return nil, nil, fmt.Errorf("not a Task") + return nil, nil, errors.New("not Task") } - return labels.Set(task.ObjectMeta.Labels), ToSelectableFields(task), nil + + return task.ObjectMeta.Labels, ToSelectableFields(task), nil } // ToSelectableFields returns a field set that represents the object -// TODO: fields are not labels, and the validation rules for them do not apply. func ToSelectableFields(task *kkcorev1alpha1.Task) fields.Set { // The purpose of allocation with a given number of elements is to reduce // amount of allocations needed to create the fields.Set. If you add any // field here or the number of object-meta related fields changes, this should // be adjusted. - taskSpecificFieldsSet := make(fields.Set, 10) + taskSpecificFieldsSet := make(fields.Set) for _, reference := range task.OwnerReferences { if reference.Kind == pipelineKind { taskSpecificFieldsSet[kkcorev1alpha1.TaskOwnerField] = types.NamespacedName{ Namespace: task.Namespace, Name: reference.Name, }.String() + break } } + return apigeneric.AddObjectMetaFieldsSet(taskSpecificFieldsSet, &task.ObjectMeta, true) } // OwnerPipelineTriggerFunc returns value ownerReference is pipeline of given object. func OwnerPipelineTriggerFunc(obj runtime.Object) string { - task := obj.(*kkcorev1alpha1.Task) - for _, reference := range task.OwnerReferences { - if reference.Kind == pipelineKind { - return types.NamespacedName{ - Namespace: task.Namespace, - Name: reference.Name, - }.String() + if task, ok := obj.(*kkcorev1alpha1.Task); ok { + for _, reference := range task.OwnerReferences { + if reference.Kind == pipelineKind { + return types.NamespacedName{ + Namespace: task.Namespace, + Name: reference.Name, + }.String() + } } } + return "" } diff --git a/pkg/proxy/router.go b/pkg/proxy/router.go index 5ac89a2d..281254d4 100644 --- a/pkg/proxy/router.go +++ b/pkg/proxy/router.go @@ -40,9 +40,11 @@ type sortableDispatcherCandidates struct { func (dc *sortableDispatcherCandidates) Len() int { return len(dc.candidates) } + func (dc *sortableDispatcherCandidates) Swap(i, j int) { dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] } + func (dc *sortableDispatcherCandidates) Less(i, j int) bool { ci := dc.candidates[i] cj := dc.candidates[j] diff --git a/pkg/proxy/transport.go b/pkg/proxy/transport.go index 0bd9f7e5..e8f76a6a 100644 --- a/pkg/proxy/transport.go +++ b/pkg/proxy/transport.go @@ -18,6 +18,7 @@ package proxy import ( "bytes" + "errors" "fmt" "io" "net/http" @@ -52,6 +53,7 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/proxy/resources/task" ) +// NewConfig replace the restconfig transport to proxy transport func NewConfig(restconfig *rest.Config) (*rest.Config, error) { var err error restconfig.Transport, err = newProxyTransport(restconfig) @@ -59,20 +61,25 @@ func NewConfig(restconfig *rest.Config) (*rest.Config, error) { return nil, fmt.Errorf("create proxy transport error: %w", err) } restconfig.TLSClientConfig = rest.TLSClientConfig{} + return restconfig, nil } // NewProxyTransport return a new http.RoundTripper use in ctrl.client. -// when restConfig is not empty: should connect a kubernetes cluster and store some resources in there. -// such as: pipeline.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1 +// When restConfig is not empty: should connect a kubernetes cluster and store some resources in there. +// Such as: pipeline.kubekey.kubesphere.io/v1, inventory.kubekey.kubesphere.io/v1, config.kubekey.kubesphere.io/v1 // when restConfig is empty: store all resource in local. // // SPECIFICALLY: since tasks is running data, which is reentrant and large in quantity, // they should always store in local. func newProxyTransport(restConfig *rest.Config) (http.RoundTripper, error) { lt := &transport{ - authz: authorizerfactory.NewAlwaysAllowAuthorizer(), - handlerChainFunc: defaultHandlerChain, + authz: authorizerfactory.NewAlwaysAllowAuthorizer(), + handlerChainFunc: func(handler http.Handler) http.Handler { + return genericapifilters.WithRequestInfo(handler, &apirequest.RequestInfoFactory{ + APIPrefixes: sets.NewString("apis"), + }) + }, } if restConfig.Host != "" { clientFor, err := rest.HTTPClientFor(restConfig) @@ -83,83 +90,94 @@ func newProxyTransport(restConfig *rest.Config) (http.RoundTripper, error) { } // register kkcorev1alpha1 resources - kkv1alpha1 := newApiIResources(kkcorev1alpha1.SchemeGroupVersion) + kkv1alpha1 := newAPIIResources(kkcorev1alpha1.SchemeGroupVersion) storage, err := task.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1alpha1.SchemeGroupVersion)) if err != nil { - klog.V(4).ErrorS(err, "failed to create storage") + klog.V(6).ErrorS(err, "failed to create storage") + return nil, err } if err := kkv1alpha1.AddResource(resourceOptions{ path: "tasks", storage: storage.Task, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } if err := kkv1alpha1.AddResource(resourceOptions{ path: "tasks/status", storage: storage.TaskStatus, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } if err := lt.registerResources(kkv1alpha1); err != nil { - klog.V(4).ErrorS(err, "failed to register resources") + klog.V(6).ErrorS(err, "failed to register resources") } // when restConfig is null. should store all resource local if restConfig.Host == "" { // register kkcorev1 resources - kkv1 := newApiIResources(kkcorev1.SchemeGroupVersion) + kkv1 := newAPIIResources(kkcorev1.SchemeGroupVersion) // add config configStorage, err := config.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion)) if err != nil { - klog.V(4).ErrorS(err, "failed to create storage") + klog.V(6).ErrorS(err, "failed to create storage") + return nil, err } if err := kkv1.AddResource(resourceOptions{ path: "configs", storage: configStorage.Config, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } // add inventory inventoryStorage, err := inventory.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion)) if err != nil { - klog.V(4).ErrorS(err, "failed to create storage") + klog.V(6).ErrorS(err, "failed to create storage") + return nil, err } if err := kkv1.AddResource(resourceOptions{ path: "inventories", storage: inventoryStorage.Inventory, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } // add pipeline pipelineStorage, err := pipeline.NewStorage(internal.NewFileRESTOptionsGetter(kkcorev1.SchemeGroupVersion)) if err != nil { - klog.V(4).ErrorS(err, "failed to create storage") + klog.V(6).ErrorS(err, "failed to create storage") + return nil, err } if err := kkv1.AddResource(resourceOptions{ path: "pipelines", storage: pipelineStorage.Pipeline, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } if err := kkv1.AddResource(resourceOptions{ path: "pipelines/status", storage: pipelineStorage.PipelineStatus, }); err != nil { - klog.V(4).ErrorS(err, "failed to add resource") + klog.V(6).ErrorS(err, "failed to add resource") + return nil, err } if err := lt.registerResources(kkv1); err != nil { - klog.V(4).ErrorS(err, "failed to register resources") + klog.V(6).ErrorS(err, "failed to register resources") + return nil, err } } @@ -171,15 +189,19 @@ type responseWriter struct { *http.Response } +// Header get header for responseWriter func (r *responseWriter) Header() http.Header { return r.Response.Header } +// Write body for responseWriter func (r *responseWriter) Write(bs []byte) (int, error) { r.Response.Body = io.NopCloser(bytes.NewBuffer(bs)) + return 0, nil } +// WriteHeader writer header for responseWriter func (r *responseWriter) WriteHeader(statusCode int) { r.Response.StatusCode = statusCode } @@ -196,6 +218,7 @@ type transport struct { handlerChainFunc func(handler http.Handler) http.Handler } +// RoundTrip deal proxy transport http.Request. func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) { if l.restClient != nil && !strings.HasPrefix(request.URL.Path, "/apis/"+kkcorev1alpha1.SchemeGroupVersion.String()) { return l.restClient.Transport.RoundTrip(request) @@ -212,6 +235,7 @@ func (l *transport) RoundTrip(request *http.Request) (*http.Response, error) { } // call handler l.handlerChainFunc(handler).ServeHTTP(&responseWriter{response}, request) + return response, nil } @@ -226,61 +250,32 @@ func (l transport) detectDispatcher(request *http.Request) (http.HandlerFunc, er } } if len(filtered.candidates) == 0 { - return nil, fmt.Errorf("not found") + return nil, errors.New("not found") } sort.Sort(sort.Reverse(filtered)) handler, ok := filtered.candidates[0].router.handlers[request.Method] if !ok { - return nil, fmt.Errorf("not found") + return nil, errors.New("not found") } + return handler, nil } func (l *transport) registerResources(resources *apiResources) error { // register apiResources router - l.registerRouter(http.MethodGet, resources.prefix, resources.handlerApiResources(), true) + l.registerRouter(http.MethodGet, resources.prefix, resources.handlerAPIResources(), true) // register resources router for _, o := range resources.resourceOptions { // what verbs are supported by the storage, used to know what verbs we support per path - creater, isCreater := o.storage.(apirest.Creater) - namedCreater, isNamedCreater := o.storage.(apirest.NamedCreater) - lister, isLister := o.storage.(apirest.Lister) - getter, isGetter := o.storage.(apirest.Getter) - getterWithOptions, isGetterWithOptions := o.storage.(apirest.GetterWithOptions) - gracefulDeleter, isGracefulDeleter := o.storage.(apirest.GracefulDeleter) - collectionDeleter, isCollectionDeleter := o.storage.(apirest.CollectionDeleter) - updater, isUpdater := o.storage.(apirest.Updater) - patcher, isPatcher := o.storage.(apirest.Patcher) - watcher, isWatcher := o.storage.(apirest.Watcher) - connecter, isConnecter := o.storage.(apirest.Connecter) - tableProvider, isTableProvider := o.storage.(apirest.TableConvertor) + + _, isLister := o.storage.(apirest.Lister) + _, isTableProvider := o.storage.(apirest.TableConvertor) if isLister && !isTableProvider { // All listers must implement TableProvider return fmt.Errorf("%q must implement TableConvertor", o.path) } - gvAcceptor, _ := o.storage.(apirest.GroupVersionAcceptor) - if isNamedCreater { - isCreater = true - } - - allowWatchList := isWatcher && isLister - var ( - connectSubpath bool - getSubpath bool - ) - if isConnecter { - _, connectSubpath, _ = connecter.NewConnectOptions() - } - if isGetterWithOptions { - _, getSubpath, _ = getterWithOptions.NewGetOptions() - } - resource, subresource, err := splitSubresource(o.path) - if err != nil { - return err - } - isSubresource := subresource != "" scoper, ok := o.storage.(apirest.Scoper) if !ok { return fmt.Errorf("%q must implement scoper", o.path) @@ -288,119 +283,93 @@ func (l *transport) registerResources(resources *apiResources) error { // Get the list of actions for the given scope. switch { - case !scoper.NamespaceScoped(): + case !scoper.NamespaceScoped(): // cluster // do nothing. The current managed resources are all namespace scope. - default: - resourcePath := "/namespaces/{namespace}/" + resource - itemPath := resourcePath + "/{name}" - if isSubresource { - itemPath = itemPath + "/" + subresource - resourcePath = itemPath - } - // request scope - fqKindToRegister, err := apiendpoints.GetResourceKind(resources.gv, o.storage, _const.Scheme) + default: // namespace + reqScope, err := newReqScope(resources, o, l.authz) if err != nil { return err } - reqScope := apihandlers.RequestScope{ - Namer: apihandlers.ContextBasedNaming{ - Namer: meta.NewAccessor(), - ClusterScoped: false, - }, - Serializer: _const.Codecs, - ParameterCodec: _const.ParameterCodec, - Creater: _const.Scheme, - Convertor: _const.Scheme, - Defaulter: _const.Scheme, - Typer: _const.Scheme, - UnsafeConvertor: _const.Scheme, - Authorizer: l.authz, - - EquivalentResourceMapper: runtime.NewEquivalentResourceRegistry(), - - // TODO: Check for the interface on storage - TableConvertor: tableProvider, - - // TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this. - Resource: resources.gv.WithResource(resource), - Subresource: subresource, - Kind: fqKindToRegister, - - AcceptsGroupVersionDelegate: gvAcceptor, - - HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, - - MetaGroupVersion: metav1.SchemeGroupVersion, - - MaxRequestBodyBytes: 0, - } - var resetFields map[fieldpath.APIVersion]*fieldpath.Set - if resetFieldsStrategy, isResetFieldsStrategy := o.storage.(apirest.ResetFieldsStrategy); isResetFieldsStrategy { - resetFields = resetFieldsStrategy.GetResetFields() - } - reqScope.FieldManager, err = managedfields.NewDefaultFieldManager( - managedfields.NewDeducedTypeConverter(), - _const.Scheme, - _const.Scheme, - _const.Scheme, - fqKindToRegister, - reqScope.HubGroupVersion, - subresource, - resetFields, - ) - if err != nil { - return err - } - // LIST - l.registerRouter(http.MethodGet, resources.prefix+resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), isLister) + l.registerList(resources, reqScope, o) // POST - if isNamedCreater { - l.registerRouter(http.MethodPost, resources.prefix+resourcePath, apihandlers.CreateNamedResource(namedCreater, &reqScope, o.admit), isCreater) - } else { - l.registerRouter(http.MethodPost, resources.prefix+resourcePath, apihandlers.CreateResource(creater, &reqScope, o.admit), isCreater) - } + l.registerPost(resources, reqScope, o) // DELETECOLLECTION - l.registerRouter(http.MethodDelete, resources.prefix+resourcePath, apihandlers.DeleteCollection(collectionDeleter, isCollectionDeleter, &reqScope, o.admit), isCollectionDeleter) + l.registerDeleteCollection(resources, reqScope, o) // DEPRECATED in 1.11 WATCHLIST - l.registerRouter(http.MethodGet, resources.prefix+"/watch"+resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), allowWatchList) + l.registerWatchList(resources, reqScope, o) // GET - if isGetterWithOptions { - l.registerRouter(http.MethodGet, resources.prefix+itemPath, apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, isSubresource), isGetter) - l.registerRouter(http.MethodGet, resources.prefix+itemPath+"/{path:*}", apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, isSubresource), isGetter && getSubpath) - } else { - l.registerRouter(http.MethodGet, resources.prefix+itemPath, apihandlers.GetResource(getter, &reqScope), isGetter) - l.registerRouter(http.MethodGet, resources.prefix+itemPath+"/{path:*}", apihandlers.GetResource(getter, &reqScope), isGetter && getSubpath) - } + l.registerGet(resources, reqScope, o) // PUT - l.registerRouter(http.MethodPut, resources.prefix+itemPath, apihandlers.UpdateResource(updater, &reqScope, o.admit), isUpdater) + l.registerPut(resources, reqScope, o) // PATCH - supportedTypes := []string{ - string(types.JSONPatchType), - string(types.MergePatchType), - string(types.StrategicMergePatchType), - string(types.ApplyPatchType), - } - l.registerRouter(http.MethodPatch, resources.prefix+itemPath, apihandlers.PatchResource(patcher, &reqScope, o.admit, supportedTypes), isPatcher) + l.registerPatch(resources, reqScope, o) // DELETE - l.registerRouter(http.MethodDelete, resources.prefix+itemPath, apihandlers.DeleteResource(gracefulDeleter, isGracefulDeleter, &reqScope, o.admit), isGracefulDeleter) + l.registerDelete(resources, reqScope, o) // DEPRECATED in 1.11 WATCH - l.registerRouter(http.MethodGet, resources.prefix+"/watch"+itemPath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), isWatcher) + l.registerWatch(resources, reqScope, o) // CONNECT - l.registerRouter(http.MethodConnect, resources.prefix+itemPath, apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, isSubresource), isConnecter) - l.registerRouter(http.MethodConnect, resources.prefix+itemPath+"/{path:*}", apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, isSubresource), isConnecter && connectSubpath) - // list or post across namespace. - // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods. - // LIST - l.registerRouter(http.MethodGet, resources.prefix+"/"+resource, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), !isSubresource && isLister) - // WATCHLIST - l.registerRouter(http.MethodGet, resources.prefix+"/watch/"+resource, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), !isSubresource && allowWatchList) + l.registerConnect(resources, reqScope, o) } } + return nil } -func (l *transport) registerRouter(verb string, path string, handler http.HandlerFunc, shouldAdd bool) { +// newReqScope for resource. +func newReqScope(resources *apiResources, o resourceOptions, authz authorizer.Authorizer) (apihandlers.RequestScope, error) { + tableProvider, _ := o.storage.(apirest.TableConvertor) + gvAcceptor, _ := o.storage.(apirest.GroupVersionAcceptor) + // request scope + fqKindToRegister, err := apiendpoints.GetResourceKind(resources.gv, o.storage, _const.Scheme) + if err != nil { + return apihandlers.RequestScope{}, err + } + reqScope := apihandlers.RequestScope{ + Namer: apihandlers.ContextBasedNaming{ + Namer: meta.NewAccessor(), + ClusterScoped: false, + }, + Serializer: _const.Codecs, + ParameterCodec: _const.ParameterCodec, + Creater: _const.Scheme, + Convertor: _const.Scheme, + Defaulter: _const.Scheme, + Typer: _const.Scheme, + UnsafeConvertor: _const.Scheme, + Authorizer: authz, + EquivalentResourceMapper: runtime.NewEquivalentResourceRegistry(), + TableConvertor: tableProvider, + Resource: resources.gv.WithResource(o.resource), + Subresource: o.subresource, + Kind: fqKindToRegister, + AcceptsGroupVersionDelegate: gvAcceptor, + HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, + MetaGroupVersion: metav1.SchemeGroupVersion, + MaxRequestBodyBytes: 0, + } + var resetFields map[fieldpath.APIVersion]*fieldpath.Set + if resetFieldsStrategy, isResetFieldsStrategy := o.storage.(apirest.ResetFieldsStrategy); isResetFieldsStrategy { + resetFields = resetFieldsStrategy.GetResetFields() + } + reqScope.FieldManager, err = managedfields.NewDefaultFieldManager( + managedfields.NewDeducedTypeConverter(), + _const.Scheme, + _const.Scheme, + _const.Scheme, + fqKindToRegister, + reqScope.HubGroupVersion, + o.subresource, + resetFields, + ) + if err != nil { + return apihandlers.RequestScope{}, err + } + + return reqScope, nil +} + +func (l *transport) registerRouter(verb, path string, handler http.HandlerFunc, shouldAdd bool) { if !shouldAdd { // if the router should not be added. return return @@ -412,17 +381,20 @@ func (l *transport) registerRouter(verb string, path string, handler http.Handle // add handler to router if _, ok := r.handlers[verb]; ok { // if handler is exists. throw error - klog.V(4).ErrorS(fmt.Errorf("handler has already register"), "failed to register router", "path", path, "verb", verb) + klog.V(6).ErrorS(errors.New("handler has already register"), "failed to register router", "path", path, "verb", verb) + return } l.routers[i].handlers[verb] = handler + return } // add new router expression, err := newPathExpression(path) if err != nil { - klog.V(4).ErrorS(err, "failed to register router", "path", path, "verb", verb) + klog.V(6).ErrorS(err, "failed to register router", "path", path, "verb", verb) + return } l.routers = append(l.routers, router{ @@ -434,25 +406,84 @@ func (l *transport) registerRouter(verb string, path string, handler http.Handle }) } -// splitSubresource checks if the given storage path is the path of a subresource and returns -// the resource and subresource components. -func splitSubresource(path string) (string, string, error) { - var resource, subresource string - switch parts := strings.Split(path, "/"); len(parts) { - case 2: - resource, subresource = parts[0], parts[1] - case 1: - resource = parts[0] - default: - return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)") +func (l *transport) registerList(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + lister, isLister := o.storage.(apirest.Lister) + watcher, isWatcher := o.storage.(apirest.Watcher) + l.registerRouter(http.MethodGet, resources.prefix+o.resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), isLister) + // list or post across namespace. + // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods. + // LIST + l.registerRouter(http.MethodGet, resources.prefix+"/"+o.resource, apihandlers.ListResource(lister, watcher, &reqScope, false, resources.minRequestTimeout), o.subresource == "" && isLister) + // WATCHLIST + l.registerRouter(http.MethodGet, resources.prefix+"/watch/"+o.resource, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), o.subresource == "" && isWatcher && isLister) +} + +func (l *transport) registerPost(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + creater, isCreater := o.storage.(apirest.Creater) + namedCreater, isNamedCreater := o.storage.(apirest.NamedCreater) + if isNamedCreater { + l.registerRouter(http.MethodPost, resources.prefix+o.resourcePath, apihandlers.CreateNamedResource(namedCreater, &reqScope, o.admit), isCreater) + } else { + l.registerRouter(http.MethodPost, resources.prefix+o.resourcePath, apihandlers.CreateResource(creater, &reqScope, o.admit), isCreater) } - return resource, subresource, nil } -var defaultRequestInfoResolver = &apirequest.RequestInfoFactory{ - APIPrefixes: sets.NewString("apis"), +func (l *transport) registerDeleteCollection(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + collectionDeleter, isCollectionDeleter := o.storage.(apirest.CollectionDeleter) + l.registerRouter(http.MethodDelete, resources.prefix+o.resourcePath, apihandlers.DeleteCollection(collectionDeleter, isCollectionDeleter, &reqScope, o.admit), isCollectionDeleter) } -func defaultHandlerChain(handler http.Handler) http.Handler { - return genericapifilters.WithRequestInfo(handler, defaultRequestInfoResolver) +func (l *transport) registerWatchList(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + lister, isLister := o.storage.(apirest.Lister) + watcher, isWatcher := o.storage.(apirest.Watcher) + l.registerRouter(http.MethodGet, resources.prefix+"/watch"+o.resourcePath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), isWatcher && isLister) +} + +func (l *transport) registerGet(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + getterWithOptions, isGetterWithOptions := o.storage.(apirest.GetterWithOptions) + getter, isGetter := o.storage.(apirest.Getter) + if isGetterWithOptions { + _, getSubpath, _ := getterWithOptions.NewGetOptions() + l.registerRouter(http.MethodGet, resources.prefix+o.itemPath, apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, o.subresource != ""), isGetter) + l.registerRouter(http.MethodGet, resources.prefix+o.itemPath+"/{path:*}", apihandlers.GetResourceWithOptions(getterWithOptions, &reqScope, o.subresource != ""), isGetter && getSubpath) + } else { + l.registerRouter(http.MethodGet, resources.prefix+o.itemPath, apihandlers.GetResource(getter, &reqScope), isGetter) + l.registerRouter(http.MethodGet, resources.prefix+o.itemPath+"/{path:*}", apihandlers.GetResource(getter, &reqScope), false) + } +} + +func (l *transport) registerPut(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + updater, isUpdater := o.storage.(apirest.Updater) + l.registerRouter(http.MethodPut, resources.prefix+o.itemPath, apihandlers.UpdateResource(updater, &reqScope, o.admit), isUpdater) +} + +func (l *transport) registerPatch(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + patcher, isPatcher := o.storage.(apirest.Patcher) + l.registerRouter(http.MethodPatch, resources.prefix+o.itemPath, apihandlers.PatchResource(patcher, &reqScope, o.admit, []string{ + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), + string(types.ApplyPatchType), + }), isPatcher) +} + +func (l *transport) registerDelete(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + gracefulDeleter, isGracefulDeleter := o.storage.(apirest.GracefulDeleter) + l.registerRouter(http.MethodDelete, resources.prefix+o.itemPath, apihandlers.DeleteResource(gracefulDeleter, isGracefulDeleter, &reqScope, o.admit), isGracefulDeleter) +} + +func (l *transport) registerWatch(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + lister, _ := o.storage.(apirest.Lister) + watcher, isWatcher := o.storage.(apirest.Watcher) + l.registerRouter(http.MethodGet, resources.prefix+"/watch"+o.itemPath, apihandlers.ListResource(lister, watcher, &reqScope, true, resources.minRequestTimeout), isWatcher) +} + +func (l *transport) registerConnect(resources *apiResources, reqScope apihandlers.RequestScope, o resourceOptions) { + var connectSubpath bool + connecter, isConnecter := o.storage.(apirest.Connecter) + if isConnecter { + _, connectSubpath, _ = connecter.NewConnectOptions() + } + l.registerRouter(http.MethodConnect, resources.prefix+o.itemPath, apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, o.subresource != ""), isConnecter) + l.registerRouter(http.MethodConnect, resources.prefix+o.itemPath+"/{path:*}", apihandlers.ConnectResource(connecter, &reqScope, o.admit, o.path, o.subresource != ""), isConnecter && connectSubpath) } diff --git a/pkg/variable/helper.go b/pkg/variable/helper.go index 05fe9000..f82c30b7 100644 --- a/pkg/variable/helper.go +++ b/pkg/variable/helper.go @@ -23,10 +23,12 @@ import ( "slices" "strconv" "strings" + "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" "k8s.io/klog/v2" + "k8s.io/utils/ptr" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" _const "github.com/kubesphere/kubekey/v4/pkg/const" @@ -35,45 +37,56 @@ import ( // combineVariables merge multiple variables into one variable // v2 will override v1 if variable is repeated -func combineVariables(v1, v2 map[string]any) map[string]any { +func combineVariables(m1, m2 map[string]any) map[string]any { var f func(val1, val2 any) any f = func(val1, val2 any) any { - if val1 != nil && reflect.TypeOf(val1).Kind() == reflect.Map && - val2 != nil && reflect.TypeOf(val2).Kind() == reflect.Map { + if val1 != nil && val2 != nil && + reflect.TypeOf(val1).Kind() == reflect.Map && reflect.TypeOf(val2).Kind() == reflect.Map { mergedVars := make(map[string]any) for _, k := range reflect.ValueOf(val1).MapKeys() { mergedVars[k.String()] = reflect.ValueOf(val1).MapIndex(k).Interface() } + for _, k := range reflect.ValueOf(val2).MapKeys() { mergedVars[k.String()] = f(mergedVars[k.String()], reflect.ValueOf(val2).MapIndex(k).Interface()) } + return mergedVars } + return val2 } mv := make(map[string]any) - for k, v := range v1 { + + for k, v := range m1 { mv[k] = v } - for k, v := range v2 { + + for k, v := range m2 { mv[k] = f(mv[k], v) } + return mv } func convertGroup(inv kkcorev1.Inventory) map[string]any { groups := make(map[string]any) all := make([]string, 0) + for hn := range inv.Spec.Hosts { all = append(all, hn) } + if !slices.Contains(all, _const.VariableLocalHost) { // set default localhost all = append(all, _const.VariableLocalHost) } + groups[_const.VariableGroupsAll] = all + for gn := range inv.Spec.Groups { groups[gn] = hostsInGroup(inv, gn) } + return groups } @@ -85,20 +98,23 @@ func hostsInGroup(inv kkcorev1.Inventory, groupName string) []string { for _, cg := range v.Groups { hosts = mergeSlice(hostsInGroup(inv, cg), hosts) } + return mergeSlice(hosts, v.Hosts) } + return nil } // mergeSlice with skip repeat value func mergeSlice(g1, g2 []string) []string { uniqueValues := make(map[string]bool) - mg := []string{} + mg := make([]string, 0) // Add values from the first slice for _, v := range g1 { if !uniqueValues[v] { uniqueValues[v] = true + mg = append(mg, v) } } @@ -107,6 +123,7 @@ func mergeSlice(g1, g2 []string) []string { for _, v := range g2 { if !uniqueValues[v] { uniqueValues[v] = true + mg = append(mg, v) } } @@ -118,52 +135,43 @@ func mergeSlice(g1, g2 []string) []string { func parseVariable(v any, parseTmplFunc func(string) (string, error)) error { switch reflect.ValueOf(v).Kind() { case reflect.Map: - for _, kv := range reflect.ValueOf(v).MapKeys() { - val := reflect.ValueOf(v).MapIndex(kv) - if vv, ok := val.Interface().(string); ok { - if tmpl.IsTmplSyntax(vv) { - newValue, err := parseTmplFunc(vv) - if err != nil { - return err - } - switch { - case strings.EqualFold(newValue, "TRUE"): - reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(true)) - case strings.EqualFold(newValue, "FALSE"): - reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(false)) - default: - reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue)) - } - } - } else { - if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { - return err - } - } + if err := parseVariableFromMap(v, parseTmplFunc); err != nil { + return err } case reflect.Slice, reflect.Array: - for i := 0; i < reflect.ValueOf(v).Len(); i++ { - val := reflect.ValueOf(v).Index(i) - if vv, ok := val.Interface().(string); ok { - if tmpl.IsTmplSyntax(vv) { - newValue, err := parseTmplFunc(vv) - if err != nil { - return err - } - switch { - case strings.EqualFold(newValue, "TRUE"): + if err := parseVariableFromArray(v, parseTmplFunc); err != nil { + return err + } + } - val.Set(reflect.ValueOf(true)) - case strings.EqualFold(newValue, "FALSE"): - val.Set(reflect.ValueOf(false)) - default: - val.Set(reflect.ValueOf(newValue)) - } - } - } else { - if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { - return err - } + return nil +} + +// parseVariableFromMap parse to variable when the v is map. +func parseVariableFromMap(v any, parseTmplFunc func(string) (string, error)) error { + for _, kv := range reflect.ValueOf(v).MapKeys() { + val := reflect.ValueOf(v).MapIndex(kv) + if vv, ok := val.Interface().(string); ok { + if !tmpl.IsTmplSyntax(vv) { + continue + } + + newValue, err := parseTmplFunc(vv) + if err != nil { + return err + } + + switch { + case strings.EqualFold(newValue, "TRUE"): + reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(true)) + case strings.EqualFold(newValue, "FALSE"): + reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(false)) + default: + reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue)) + } + } else { + if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { + return err } } } @@ -171,23 +179,79 @@ func parseVariable(v any, parseTmplFunc func(string) (string, error)) error { return nil } +// parseVariableFromArray parse to variable when the v is slice. +func parseVariableFromArray(v any, parseTmplFunc func(string) (string, error)) error { + for i := range reflect.ValueOf(v).Len() { + val := reflect.ValueOf(v).Index(i) + if vv, ok := val.Interface().(string); ok { + if !tmpl.IsTmplSyntax(vv) { + continue + } + + newValue, err := parseTmplFunc(vv) + if err != nil { + return err + } + + switch { + case strings.EqualFold(newValue, "TRUE"): + val.Set(reflect.ValueOf(true)) + case strings.EqualFold(newValue, "FALSE"): + val.Set(reflect.ValueOf(false)) + default: + val.Set(reflect.ValueOf(newValue)) + } + } else { + if err := parseVariable(val.Interface(), parseTmplFunc); err != nil { + return err + } + } + } + + return nil +} + +// setlocalhostVarialbe set default vars when hostname is "localhost" +func setlocalhostVarialbe(hostname string, v value, hostVars map[string]any) { + if hostname == _const.VariableLocalHost { + if os, ok := v.Hosts[hostname].RemoteVars[_const.VariableOS]; ok { + // try to set hostname by current actual hostname. + if osd, ok := os.(map[string]any); ok { + hostVars[_const.VariableHostName] = osd[_const.VariableOSHostName] + } + } + + if _, ok := hostVars[_const.VariableIPv4]; !ok { + hostVars[_const.VariableIPv4] = getLocalIP(_const.VariableIPv4) + } + + if _, ok := hostVars[_const.VariableIPv6]; !ok { + hostVars[_const.VariableIPv6] = getLocalIP(_const.VariableIPv6) + } + } +} + // getLocalIP get the ipv4 or ipv6 for localhost machine func getLocalIP(ipType string) string { addrs, err := net.InterfaceAddrs() if err != nil { klog.ErrorS(err, "get network address error") } + for _, addr := range addrs { if ipNet, ok := addr.(*net.IPNet); ok && !ipNet.IP.IsLoopback() { if ipType == _const.VariableIPv4 && ipNet.IP.To4() != nil { return ipNet.IP.String() } + if ipType == _const.VariableIPv6 && ipNet.IP.To16() != nil && ipNet.IP.To4() == nil { return ipNet.IP.String() } } } + klog.V(4).Infof("connot get local %s address", ipType) + return "" } @@ -195,15 +259,18 @@ func getLocalIP(ipType string) string { func StringVar(d map[string]any, args map[string]any, key string) (string, error) { val, ok := args[key] if !ok { - klog.V(4).ErrorS(nil, "cannot find variable", "key", key) + klog.V(4).InfoS("cannot find variable", "key", key) + return "", fmt.Errorf("cannot find variable \"%s\"", key) } // convert to string sv, ok := val.(string) if !ok { klog.V(4).ErrorS(nil, "variable is not string", "key", key) + return "", fmt.Errorf("variable \"%s\" is not string", key) } + return tmpl.ParseString(d, sv) } @@ -211,72 +278,137 @@ func StringVar(d map[string]any, args map[string]any, key string) (string, error func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string, error) { val, ok := vars[key] if !ok { - klog.V(4).ErrorS(nil, "cannot find variable", "key", key) + klog.V(4).InfoS("cannot find variable", "key", key) + return nil, fmt.Errorf("cannot find variable \"%s\"", key) } + switch valv := val.(type) { case []any: var ss []string + for _, a := range valv { av, ok := a.(string) if !ok { klog.V(6).InfoS("variable is not string", "key", key) + return nil, nil } + as, err := tmpl.ParseString(d, av) if err != nil { return nil, err } + ss = append(ss, as) } + return ss, nil case string: as, err := tmpl.ParseString(d, valv) if err != nil { klog.V(4).ErrorS(err, "parse variable error", "key", key) + return nil, err } + var ss []string if err := json.Unmarshal([]byte(as), &ss); err == nil { return ss, nil } + return []string{as}, nil default: klog.V(4).ErrorS(nil, "unsupported variable type", "key", key) + return nil, fmt.Errorf("unsupported variable \"%s\" type", key) } } // IntVar get int value by key -func IntVar(d map[string]any, vars map[string]any, key string) (int, error) { +func IntVar(d map[string]any, vars map[string]any, key string) (*int, error) { val, ok := vars[key] if !ok { - klog.V(4).ErrorS(nil, "cannot find variable", "key", key) - return 0, fmt.Errorf("cannot find variable \"%s\"", key) + klog.V(4).InfoS("cannot find variable", "key", key) + + return nil, fmt.Errorf("cannot find variable \"%s\"", key) } // default convert to int v := reflect.ValueOf(val) switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return int(v.Int()), nil + return ptr.To(int(v.Int())), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return int(v.Uint()), nil + return ptr.To(int(v.Uint())), nil case reflect.Float32, reflect.Float64: - return int(v.Float()), nil + return ptr.To(int(v.Float())), nil case reflect.String: vs, err := tmpl.ParseString(d, v.String()) if err != nil { klog.V(4).ErrorS(err, "parse string variable error", "key", key) - return 0, err + + return nil, err } - return strconv.Atoi(vs) + + atoi, err := strconv.Atoi(vs) + if err != nil { + klog.V(4).ErrorS(err, "parse convert string to int error", "key", key) + + return nil, err + } + + return ptr.To(atoi), nil default: klog.V(4).ErrorS(nil, "unsupported variable type", "key", key) - return 0, fmt.Errorf("unsupported variable \"%s\" type", key) + + return nil, fmt.Errorf("unsupported variable \"%s\" type", key) } } -// Extension2Variables convert extension to variables +// BoolVar get bool value by key +func BoolVar(d map[string]any, args map[string]any, key string) (*bool, error) { + val, ok := args[key] + if !ok { + klog.V(4).InfoS("cannot find variable", "key", key) + + return nil, fmt.Errorf("cannot find variable \"%s\"", key) + } + // default convert to int + v := reflect.ValueOf(val) + switch v.Kind() { + case reflect.Bool: + return ptr.To(v.Bool()), nil + case reflect.String: + vs, err := tmpl.ParseString(d, v.String()) + if err != nil { + klog.V(4).ErrorS(err, "parse string variable error", "key", key) + + return nil, err + } + + if strings.EqualFold(vs, "TRUE") { + return ptr.To(true), nil + } + + if strings.EqualFold(vs, "FALSE") { + return ptr.To(false), nil + } + } + + return nil, fmt.Errorf("unsupported variable \"%s\" type", key) +} + +// DurationVar get time.Duration value by key +func DurationVar(d map[string]any, args map[string]any, key string) (time.Duration, error) { + stringVar, err := StringVar(d, args, key) + if err != nil { + return 0, err + } + + return time.ParseDuration(stringVar) +} + +// Extension2Variables convert runtime.RawExtension to variables func Extension2Variables(ext runtime.RawExtension) map[string]any { if len(ext.Raw) == 0 { return make(map[string]any) @@ -286,10 +418,12 @@ func Extension2Variables(ext runtime.RawExtension) map[string]any { if err := json.Unmarshal(ext.Raw, &data); err != nil { klog.V(4).ErrorS(err, "failed to unmarshal extension to variables") } + return data } -// Extension2Slice convert extension to slice +// Extension2Slice convert runtime.RawExtension to slice +// if runtime.RawExtension contains tmpl syntax, parse it. func Extension2Slice(d map[string]any, ext runtime.RawExtension) []any { if len(ext.Raw) == 0 { return nil @@ -305,16 +439,21 @@ func Extension2Slice(d map[string]any, ext runtime.RawExtension) []any { if err != nil { klog.ErrorS(err, "extension2string error", "input", string(ext.Raw)) } + if err := json.Unmarshal([]byte(val), &data); err == nil { return data } + return []any{val} } +// Extension2String convert runtime.RawExtension to string. +// if runtime.RawExtension contains tmpl syntax, parse it. func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error) { if len(ext.Raw) == 0 { return "", nil } + var input = string(ext.Raw) // try to escape string if ns, err := strconv.Unquote(string(ext.Raw)); err == nil { diff --git a/pkg/variable/helper_test.go b/pkg/variable/helper_test.go index a00415aa..600da3cf 100644 --- a/pkg/variable/helper_test.go +++ b/pkg/variable/helper_test.go @@ -285,6 +285,7 @@ func TestParseVariable(t *testing.T) { if err != nil { t.Fatal(err) } + assert.Equal(t, tc.except, tc.data) }) } diff --git a/pkg/variable/internal.go b/pkg/variable/internal.go index 2ec6fd5a..3d8d0d47 100644 --- a/pkg/variable/internal.go +++ b/pkg/variable/internal.go @@ -18,6 +18,7 @@ package variable import ( "encoding/json" + "errors" "fmt" "reflect" "regexp" @@ -56,10 +57,12 @@ type value struct { func (v value) deepCopy() value { nv := value{} + data, err := json.Marshal(v) if err != nil { return value{} } + if err := json.Unmarshal(data, &nv); err != nil { return value{} } @@ -70,6 +73,7 @@ func (v value) deepCopy() value { // getParameterVariable get defined variable from inventory and config func (v value) getParameterVariable() map[string]any { globalHosts := make(map[string]any) + for hostname := range v.Hosts { // get host vars hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname]) @@ -86,25 +90,14 @@ func (v value) getParameterVariable() map[string]any { } } // set default localhost - if hostname == _const.VariableLocalHost { - if os, ok := v.Hosts[hostname].RemoteVars[_const.VariableOS]; ok { - // try to set hostname by current actual hostname. - hostVars[_const.VariableHostName] = os.(map[string]any)[_const.VariableOSHostName] - } - if _, ok := hostVars[_const.VariableIPv4]; !ok { - hostVars[_const.VariableIPv4] = getLocalIP(_const.VariableIPv4) - } - if _, ok := hostVars[_const.VariableIPv6]; !ok { - hostVars[_const.VariableIPv6] = getLocalIP(_const.VariableIPv6) - } - } - + setlocalhostVarialbe(hostname, v, hostVars) // merge inventory vars to host vars hostVars = combineVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars)) // merge config vars to host vars hostVars = combineVariables(hostVars, Extension2Variables(v.Config.Spec)) globalHosts[hostname] = hostVars } + var externalVal = make(map[string]any) // external vars for hostname := range globalHosts { @@ -128,70 +121,74 @@ type host struct { RuntimeVars map[string]any `json:"runtime"` } -func (v *variable) Key() string { - return v.key -} - +// Get vars func (v *variable) Get(f GetFunc) (any, error) { return f(v) } +// Merge hosts vars to variable and sync to resource func (v *variable) Merge(f MergeFunc) error { v.Lock() defer v.Unlock() old := v.value.deepCopy() + if err := f(v); err != nil { return err } + return v.syncSource(old) +} + +// syncSource sync hosts vars to source. +func (v *variable) syncSource(old value) error { for hn, hv := range v.value.Hosts { - if !reflect.DeepEqual(old.Hosts[hn], hv) { - if err := v.syncHosts(hn); err != nil { - klog.ErrorS(err, "sync host error", "hostname", hn) - } + if reflect.DeepEqual(old.Hosts[hn], hv) { + // nothing change skip. + continue + } + // write to source + data, err := json.MarshalIndent(hv, "", " ") + if err != nil { + klog.ErrorS(err, "marshal host data error", "hostname", hn) + + return err + } + + if err := v.source.Write(data, hn+".json"); err != nil { + klog.ErrorS(err, "write host data to local file error", "hostname", hn, "filename", hn+".json") } } return nil } -// syncHosts sync hosts data to local file. If hostname is empty, sync all hosts -func (v *variable) syncHosts(hostname ...string) error { - for _, hn := range hostname { - if hv, ok := v.value.Hosts[hn]; ok { - data, err := json.MarshalIndent(hv, "", " ") - if err != nil { - klog.ErrorS(err, "marshal host data error", "hostname", hn) - return err - } - if err := v.source.Write(data, fmt.Sprintf("%s.json", hn)); err != nil { - klog.ErrorS(err, "write host data to local file error", "hostname", hn, "filename", fmt.Sprintf("%s.json", hn)) - } - } - } - - return nil -} +// ***************************** GetFunc ***************************** // // GetHostnames get all hostnames from a group or host var GetHostnames = func(name []string) GetFunc { - return func(v Variable) (any, error) { - if _, ok := v.(*variable); !ok { - return nil, fmt.Errorf("variable type error") - } - data := v.(*variable).value + if len(name) == 0 { + return emptyGetFunc + } + return func(v Variable) (any, error) { + vv, ok := v.(*variable) + if !ok { + return nil, errors.New("variable type error") + } var hs []string for _, n := range name { // add host to hs - if _, ok := data.Hosts[n]; ok { + if _, ok := vv.value.Hosts[n]; ok { hs = append(hs, n) } // add group's host to gs - for gn, gv := range convertGroup(data.Inventory) { + for gn, gv := range convertGroup(vv.value.Inventory) { if gn == n { - hs = mergeSlice(hs, gv.([]string)) + if gvd, ok := gv.([]string); ok { + hs = mergeSlice(hs, gvd) + } + break } } @@ -202,9 +199,10 @@ var GetHostnames = func(name []string) GetFunc { index, err := strconv.Atoi(match[2]) if err != nil { klog.V(4).ErrorS(err, "convert index to int error", "index", match[2]) + return nil, err } - if group, ok := convertGroup(data.Inventory)[match[1]].([]string); ok { + if group, ok := convertGroup(vv.value.Inventory)[match[1]].([]string); ok { if index >= len(group) { return nil, fmt.Errorf("index %v out of range for group %s", index, group) } @@ -215,7 +213,7 @@ var GetHostnames = func(name []string) GetFunc { // add random host in group regexForRandom := regexp.MustCompile(`^(.+?)\s*\|\s*random$`) if match := regexForRandom.FindStringSubmatch(strings.TrimSpace(n)); match != nil { - if group, ok := convertGroup(data.Inventory)[match[1]].([]string); ok { + if group, ok := convertGroup(vv.value.Inventory)[match[1]].([]string); ok { hs = append(hs, group[rand.Intn(len(group))]) } } @@ -226,117 +224,38 @@ var GetHostnames = func(name []string) GetFunc { } // GetParamVariable get param variable which is combination of inventory, config. +// if hostname is empty, return all host's param variable. var GetParamVariable = func(hostname string) GetFunc { return func(v Variable) (any, error) { - if _, ok := v.(*variable); !ok { - return nil, fmt.Errorf("variable type error") + vv, ok := v.(*variable) + if !ok { + return nil, errors.New("variable type error") } - data := v.(*variable).value if hostname == "" { - return data.getParameterVariable(), nil - } - return data.getParameterVariable()[hostname], nil - } -} - -// MergeRemoteVariable merge variable to remote. -var MergeRemoteVariable = func(hostname string, data map[string]any) MergeFunc { - return func(v Variable) error { - if _, ok := v.(*variable); !ok { - return fmt.Errorf("variable type error") - } - vv := v.(*variable).value - - if hostname == "" { - return fmt.Errorf("when merge source is remote. HostName cannot be empty") - } - if _, ok := vv.Hosts[hostname]; !ok { - return fmt.Errorf("when merge source is remote. HostName %s not exist", hostname) + return vv.value.getParameterVariable(), nil } - // it should not be changed - if hv := vv.Hosts[hostname]; len(hv.RemoteVars) == 0 { - hv.RemoteVars = data - vv.Hosts[hostname] = hv - } - - return nil - } -} - -// MergeRuntimeVariable parse variable by specific host and merge to the host. -var MergeRuntimeVariable = func(hostName string, vd map[string]any) MergeFunc { - return func(v Variable) error { - vv := v.(*variable).value - // merge to specify host - curVariable, err := v.Get(GetAllVariable(hostName)) - if err != nil { - return err - } - // parse variable - if err := parseVariable(vd, func(s string) (string, error) { - // parse use total variable. the task variable should not contain template syntax. - return tmpl.ParseString(combineVariables(vd, curVariable.(map[string]any)), s) - }); err != nil { - return err - } - - if _, ok := v.(*variable); !ok { - return fmt.Errorf("variable type error") - } - hv := vv.Hosts[hostName] - hv.RuntimeVars = combineVariables(hv.RuntimeVars, vd) - vv.Hosts[hostName] = hv - - return nil - } -} - -// MergeAllRuntimeVariable parse variable by specific host and merge to all hosts. -var MergeAllRuntimeVariable = func(hostName string, vd map[string]any) MergeFunc { - return func(v Variable) error { - vv := v.(*variable).value - // merge to specify host - curVariable, err := v.Get(GetAllVariable(hostName)) - if err != nil { - return err - } - // parse variable - if err := parseVariable(vd, func(s string) (string, error) { - // parse use total variable. the task variable should not contain template syntax. - return tmpl.ParseString(combineVariables(vd, curVariable.(map[string]any)), s) - }); err != nil { - return err - } - - for h := range vv.Hosts { - if _, ok := v.(*variable); !ok { - return fmt.Errorf("variable type error") - } - hv := vv.Hosts[h] - hv.RuntimeVars = combineVariables(hv.RuntimeVars, vd) - vv.Hosts[h] = hv - } - - return nil + return vv.value.getParameterVariable()[hostname], nil } } // GetAllVariable get all variable for a given host var GetAllVariable = func(hostName string) GetFunc { return func(v Variable) (any, error) { - if _, ok := v.(*variable); !ok { - return nil, fmt.Errorf("variable type error") + vv, ok := v.(*variable) + if !ok { + return nil, errors.New("variable type error") } - data := v.(*variable).value result := make(map[string]any) // find from runtime - result = combineVariables(result, data.Hosts[hostName].RuntimeVars) + result = combineVariables(result, vv.value.Hosts[hostName].RuntimeVars) // find from remote - result = combineVariables(result, data.Hosts[hostName].RemoteVars) + result = combineVariables(result, vv.value.Hosts[hostName].RemoteVars) // find from global. - if vv, ok := data.getParameterVariable()[hostName]; ok { - result = combineVariables(result, vv.(map[string]any)) + if vv, ok := vv.value.getParameterVariable()[hostName]; ok { + if vvd, ok := vv.(map[string]any); ok { + result = combineVariables(result, vvd) + } } return result, nil @@ -346,14 +265,123 @@ var GetAllVariable = func(hostName string) GetFunc { // GetHostMaxLength get the max length for all hosts var GetHostMaxLength = func() GetFunc { return func(v Variable) (any, error) { - if _, ok := v.(*variable); !ok { - return nil, fmt.Errorf("variable type error") + vv, ok := v.(*variable) + if !ok { + return nil, errors.New("variable type error") } - data := v.(*variable).value var hostNameMaxLen int - for k := range data.Hosts { + for k := range vv.value.Hosts { hostNameMaxLen = max(len(k), hostNameMaxLen) } + return hostNameMaxLen, nil } } + +// ***************************** MergeFunc ***************************** // + +// MergeRemoteVariable merge variable to remote. +var MergeRemoteVariable = func(data map[string]any, hostname string) MergeFunc { + return func(v Variable) error { + vv, ok := v.(*variable) + if !ok { + return errors.New("variable type error") + } + + if hostname == "" { + return errors.New("when merge source is remote. HostName cannot be empty") + } + if _, ok := vv.value.Hosts[hostname]; !ok { + return fmt.Errorf("when merge source is remote. HostName %s not exist", hostname) + } + + // it should not be changed + if hv := vv.value.Hosts[hostname]; len(hv.RemoteVars) == 0 { + hv.RemoteVars = data + vv.value.Hosts[hostname] = hv + } + + return nil + } +} + +// MergeRuntimeVariable parse variable by specific host and merge to the host. +var MergeRuntimeVariable = func(data map[string]any, hosts ...string) MergeFunc { + if len(data) == 0 || len(hosts) == 0 { + // skip + return emptyMergeFunc + } + + return func(v Variable) error { + for _, hostName := range hosts { + vv, ok := v.(*variable) + if !ok { + return errors.New("variable type error") + } + // merge to specify host + curVariable, err := v.Get(GetAllVariable(hostName)) + if err != nil { + return err + } + // parse variable + if err := parseVariable(data, func(s string) (string, error) { + // parse use total variable. the task variable should not contain template syntax. + cv, ok := curVariable.(map[string]any) + if !ok { + return "", errors.New("variable type error") + } + + return tmpl.ParseString(combineVariables(data, cv), s) + }); err != nil { + return err + } + + if _, ok := v.(*variable); !ok { + return errors.New("variable type error") + } + hv := vv.value.Hosts[hostName] + hv.RuntimeVars = combineVariables(hv.RuntimeVars, data) + vv.value.Hosts[hostName] = hv + } + + return nil + } +} + +// MergeAllRuntimeVariable parse variable by specific host and merge to all hosts. +var MergeAllRuntimeVariable = func(data map[string]any, hostName string) MergeFunc { + return func(v Variable) error { + vv, ok := v.(*variable) + if !ok { + return errors.New("variable type error") + } + // merge to specify host + curVariable, err := v.Get(GetAllVariable(hostName)) + if err != nil { + return err + } + // parse variable + if err := parseVariable(data, func(s string) (string, error) { + // parse use total variable. the task variable should not contain template syntax. + cv, ok := curVariable.(map[string]any) + if !ok { + return "", errors.New("variable type error") + } + + return tmpl.ParseString(combineVariables(data, cv), s) + }); err != nil { + return err + } + + for h := range vv.value.Hosts { + if _, ok := v.(*variable); !ok { + return errors.New("variable type error") + } + hv := vv.value.Hosts[h] + hv.RuntimeVars = combineVariables(hv.RuntimeVars, data) + vv.value.Hosts[h] = hv + } + + return nil + } +} diff --git a/pkg/variable/internal_test.go b/pkg/variable/internal_test.go index 321809c2..07dcd16b 100644 --- a/pkg/variable/internal_test.go +++ b/pkg/variable/internal_test.go @@ -28,12 +28,12 @@ import ( func TestGetAllVariable(t *testing.T) { testcases := []struct { name string - value value + value *value except map[string]any }{ { name: "global override runtime variable", - value: value{ + value: &value{ Config: kkcorev1.Config{ Spec: runtime.RawExtension{ Raw: []byte(`{ @@ -62,13 +62,13 @@ func TestGetAllVariable(t *testing.T) { "artifact": map[string]any{ "images": []any{"abc"}, }, - "groups": map[string]interface{}{"all": []string{"localhost"}}, - "inventory_hosts": map[string]interface{}{ - "localhost": map[string]interface{}{ + "groups": map[string]any{"all": []string{"localhost"}}, + "inventory_hosts": map[string]any{ + "localhost": map[string]any{ "internal_ipv4": "127.0.0.1", "internal_ipv6": "::1", - "artifact": map[string]interface{}{ - "images": []interface{}{"abc"}, + "artifact": map[string]any{ + "images": []any{"abc"}, }, "inventory_name": "localhost", "hostname": "localhost", @@ -82,11 +82,13 @@ func TestGetAllVariable(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - v := variable{value: &tc.value} + v := variable{value: tc.value} + result, err := v.Get(GetAllVariable("localhost")) if err != nil { t.Fatal(err) } + assert.Equal(t, tc.except, result) }) } diff --git a/pkg/variable/source/file.go b/pkg/variable/source/file.go index d4b3ee0c..953908e8 100644 --- a/pkg/variable/source/file.go +++ b/pkg/variable/source/file.go @@ -24,6 +24,21 @@ import ( "k8s.io/klog/v2" ) +var _ Source = &fileSource{} + +// NewFileSource returns a new fileSource. +func NewFileSource(path string) (Source, error) { + if _, err := os.Stat(path); err != nil { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + klog.V(4).ErrorS(err, "create source path error", "path", path) + + return nil, err + } + } + + return &fileSource{path: path}, nil +} + type fileSource struct { path string } @@ -32,13 +47,16 @@ func (f *fileSource) Read() (map[string][]byte, error) { de, err := os.ReadDir(f.path) if err != nil { klog.V(4).ErrorS(err, "read dir error", "path", f.path) + return nil, err } + var result map[string][]byte for _, entry := range de { if entry.IsDir() { continue } + if result == nil { result = make(map[string][]byte) } @@ -47,8 +65,10 @@ func (f *fileSource) Read() (map[string][]byte, error) { data, err := os.ReadFile(filepath.Join(f.path, entry.Name())) if err != nil { klog.V(4).ErrorS(err, "read file error", "filename", entry.Name()) + return nil, err } + result[entry.Name()] = data } } @@ -60,12 +80,16 @@ func (f *fileSource) Write(data []byte, filename string) error { file, err := os.Create(filepath.Join(f.path, filename)) if err != nil { klog.V(4).ErrorS(err, "create file error", "filename", filename) + return err } defer file.Close() + if _, err := file.Write(data); err != nil { klog.V(4).ErrorS(err, "write file error", "filename", filename) + return err } + return nil } diff --git a/pkg/variable/source/memory.go b/pkg/variable/source/memory.go new file mode 100644 index 00000000..0ad72e71 --- /dev/null +++ b/pkg/variable/source/memory.go @@ -0,0 +1,24 @@ +package source + +var _ Source = &memorySource{} + +type memorySource struct { + data map[string][]byte +} + +// NewMemorySource returns a new memorySource. +func NewMemorySource() Source { + return &memorySource{ + data: make(map[string][]byte), + } +} + +func (m *memorySource) Read() (map[string][]byte, error) { + return m.data, nil +} + +func (m *memorySource) Write(data []byte, filename string) error { + m.data[filename] = data + + return nil +} diff --git a/pkg/variable/source/source.go b/pkg/variable/source/source.go index 82187174..600b4dd0 100644 --- a/pkg/variable/source/source.go +++ b/pkg/variable/source/source.go @@ -16,32 +16,18 @@ limitations under the License. package source -import ( - "os" +// SourceType how to store variable +type SourceType int - "k8s.io/klog/v2" +const ( + // MemorySource store variable in memory + MemorySource SourceType = iota + // FileSource store variable in file + FileSource SourceType = iota ) // Source is the source from which config is loaded. type Source interface { Read() (map[string][]byte, error) Write(data []byte, filename string) error - //Watch() (Watcher, error) -} - -// Watcher watches a source for changes. -type Watcher interface { - Next() ([]byte, error) - Stop() error -} - -// New returns a new source. -func New(path string) (Source, error) { - if _, err := os.Stat(path); err != nil { - if err := os.MkdirAll(path, os.ModePerm); err != nil { - klog.V(4).ErrorS(err, "create source path error", "path", path) - return nil, err - } - } - return &fileSource{path: path}, nil } diff --git a/pkg/variable/variable.go b/pkg/variable/variable.go index aba9afe1..67dfd96e 100644 --- a/pkg/variable/variable.go +++ b/pkg/variable/variable.go @@ -19,6 +19,8 @@ package variable import ( "context" "encoding/json" + "errors" + "fmt" "path/filepath" "strings" @@ -31,36 +33,67 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/variable/source" ) +var ( + emptyGetFunc GetFunc = func(Variable) (any, error) { + return nil, errors.New("nil value returned") + } + emptyMergeFunc MergeFunc = func(Variable) error { + return nil + } +) + +// GetFunc get data from variable type GetFunc func(Variable) (any, error) +// MergeFunc merge data to variable type MergeFunc func(Variable) error +// Variable store all vars which pipeline used. type Variable interface { - Key() string - Get(GetFunc) (any, error) - Merge(MergeFunc) error + Get(getFunc GetFunc) (any, error) + Merge(mergeFunc MergeFunc) error } // New variable. generate value from config args. and render to source. -func New(client ctrlclient.Client, pipeline kkcorev1.Pipeline) (Variable, error) { +func New(ctx context.Context, client ctrlclient.Client, pipeline kkcorev1.Pipeline, st source.SourceType) (Variable, error) { + var err error // new source - s, err := source.New(filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir)) - if err != nil { - klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) - return nil, err + var s source.Source + + switch st { + case source.MemorySource: + s = source.NewMemorySource() + case source.FileSource: + s, err = source.NewFileSource(filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir)) + if err != nil { + klog.V(4).ErrorS(err, "create file source failed", "path", filepath.Join(_const.RuntimeDirFromPipeline(pipeline), _const.RuntimePipelineVariableDir), "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + + return nil, err + } + default: + return nil, fmt.Errorf("unsupported source type: %v", st) } + // get config var config = &kkcorev1.Config{} - if err := client.Get(context.Background(), types.NamespacedName{Namespace: pipeline.Spec.ConfigRef.Namespace, Name: pipeline.Spec.ConfigRef.Name}, config); err != nil { - klog.V(4).ErrorS(err, "get config from pipeline error", "config", pipeline.Spec.ConfigRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) - return nil, err + if pipeline.Spec.ConfigRef != nil { + if err := client.Get(ctx, types.NamespacedName{Namespace: pipeline.Spec.ConfigRef.Namespace, Name: pipeline.Spec.ConfigRef.Name}, config); err != nil { + klog.V(4).ErrorS(err, "get config from pipeline error", "config", pipeline.Spec.ConfigRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + + return nil, err + } } + // get inventory var inventory = &kkcorev1.Inventory{} - if err := client.Get(context.Background(), types.NamespacedName{Namespace: pipeline.Spec.InventoryRef.Namespace, Name: pipeline.Spec.InventoryRef.Name}, inventory); err != nil { - klog.V(4).ErrorS(err, "get inventory from pipeline error", "inventory", pipeline.Spec.InventoryRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) - return nil, err + if pipeline.Spec.InventoryRef != nil { + if err := client.Get(ctx, types.NamespacedName{Namespace: pipeline.Spec.InventoryRef.Namespace, Name: pipeline.Spec.InventoryRef.Name}, inventory); err != nil { + klog.V(4).ErrorS(err, "get inventory from pipeline error", "inventory", pipeline.Spec.InventoryRef, "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + + return nil, err + } } + v := &variable{ key: string(pipeline.UID), source: s, @@ -70,10 +103,13 @@ func New(client ctrlclient.Client, pipeline kkcorev1.Pipeline) (Variable, error) Hosts: make(map[string]host), }, } - for _, hostname := range convertGroup(*inventory)["all"].([]string) { - v.value.Hosts[hostname] = host{ - RemoteVars: make(map[string]any), - RuntimeVars: make(map[string]any), + + if gd, ok := convertGroup(*inventory)["all"].([]string); ok { + for _, hostname := range gd { + v.value.Hosts[hostname] = host{ + RemoteVars: make(map[string]any), + RuntimeVars: make(map[string]any), + } } } @@ -81,15 +117,19 @@ func New(client ctrlclient.Client, pipeline kkcorev1.Pipeline) (Variable, error) data, err := v.source.Read() if err != nil { klog.V(4).ErrorS(err, "read data from source error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return nil, err } + for k, d := range data { // set hosts h := host{} if err := json.Unmarshal(d, &h); err != nil { klog.V(4).ErrorS(err, "unmarshal host error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline)) + return nil, err } + v.value.Hosts[strings.TrimSuffix(k, ".json")] = h } diff --git a/plugins/playbooks/upgrade_kernel.yaml b/plugins/playbooks/upgrade_kernel.yaml new file mode 100644 index 00000000..f1288d40 --- /dev/null +++ b/plugins/playbooks/upgrade_kernel.yaml @@ -0,0 +1,16 @@ +--- +- hosts: + - localhost + vars_files: + - vars/upgrade_kernel.yaml + tags: ["always"] + roles: + - os/init-kernel + +- hosts: + - os + vars_files: + - vars/upgrade_kernel.yaml + tags: ["always"] + roles: + - os/upgrade-kernel diff --git a/plugins/playbooks/vars/upgrade_kernel.yaml b/plugins/playbooks/vars/upgrade_kernel.yaml new file mode 100644 index 00000000..13eff7e0 --- /dev/null +++ b/plugins/playbooks/vars/upgrade_kernel.yaml @@ -0,0 +1,2 @@ +kernel_version: 5.4.278-1.el7 +arch: amd64 diff --git a/plugins/roles/os/init-kernel/defaults/main.yaml b/plugins/roles/os/init-kernel/defaults/main.yaml new file mode 100644 index 00000000..c78b7217 --- /dev/null +++ b/plugins/roles/os/init-kernel/defaults/main.yaml @@ -0,0 +1,5 @@ +rpm_url: + kernel_lt: + amd64: http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-{{ .kernel_version }}.elrepo.x86_64.rpm + kernel_lt_devel: + amd64: http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-devel-{{ .kernel_version }}.elrepo.x86_64.rpm diff --git a/plugins/roles/os/init-kernel/tasks/centos.yaml b/plugins/roles/os/init-kernel/tasks/centos.yaml new file mode 100644 index 00000000..8faa937a --- /dev/null +++ b/plugins/roles/os/init-kernel/tasks/centos.yaml @@ -0,0 +1,30 @@ +--- +- name: Download kernel-lt rpm + command: | + artifact_name={{ get .rpm_url.kernel_lt .arch | splitList "/" | last }} + artifact_path={{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .rpm_url.kernel_lt .arch }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ get .rpm_url.kernel_lt .arch }} + fi + +- name: Download kernel-lt-devel rpm + command: | + artifact_name={{ get .rpm_url.kernel_lt_devel .arch | splitList "/" | last }} + artifact_path={{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }} + if [ ! -f $artifact_path/$artifact_name ]; then + mkdir -p $artifact_path + # download online + http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .rpm_url.kernel_lt_devel .arch }}) + if [ $http_code != 200 ]; then + echo "http code is $http_code" + exit 1 + fi + curl -L -o $artifact_path/$artifact_name {{ get .rpm_url.kernel_lt_devel .arch }} + fi diff --git a/plugins/roles/os/init-kernel/tasks/main.yaml b/plugins/roles/os/init-kernel/tasks/main.yaml new file mode 100644 index 00000000..c8f997e3 --- /dev/null +++ b/plugins/roles/os/init-kernel/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- include_tasks: centos.yaml + tags: ["centos"] diff --git a/plugins/roles/os/upgrade-kernel/tasks/centos.yaml b/plugins/roles/os/upgrade-kernel/tasks/centos.yaml new file mode 100644 index 00000000..5780a1e9 --- /dev/null +++ b/plugins/roles/os/upgrade-kernel/tasks/centos.yaml @@ -0,0 +1,31 @@ +--- +- name: add aliyuns repo + command: | + now=$(date +"%Y-%m-%d %H:%M:%S") + cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak-$now + sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \ + -e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.aliyun.com|g' \ + -i.bak \ + /etc/yum.repos.d/CentOS-Base.repo + sudo yum clean all + sudo yum makecache + +- name: install necessary dependency rpm + command: | + sudo yum install linux-firmware perl -y + +- name: copy rpm to remote + copy: + src: | + {{ .work_dir }}/kubekey/kernel/{{ .kernel_version }}/{{ .arch }}/ + dest: /tmp/kubekey/kernel/ + +- name: install rpm + command: | + rpm -ivh /tmp/kubekey/kernel/kernel-lt* + +- name: set kernel + command: | + grub2-set-default 0 + grub2-mkconfig -o /boot/grub2/grub.cfg +# reboot -h now diff --git a/plugins/roles/os/upgrade-kernel/tasks/main.yaml b/plugins/roles/os/upgrade-kernel/tasks/main.yaml new file mode 100644 index 00000000..c8f997e3 --- /dev/null +++ b/plugins/roles/os/upgrade-kernel/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- include_tasks: centos.yaml + tags: ["centos"] diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 00000000..c8ae0aaf --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,698 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + return NewRootDeleteActionWithOptions(resource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.DeleteOptions) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + action.DeleteOptions = opts + + return action +} + +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + return NewDeleteActionWithOptions(resource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.DeleteOptions = opts + + return action +} + +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string + GetDeleteOptions() metav1.DeleteOptions +} + +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatchType() types.PatchType + GetPatch() []byte +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + // Stay backwards compatible. + if !strings.Contains(resource, "/") { + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) + } + + parts := strings.SplitN(resource, "/", 2) + topresource, subresource := parts[0], parts[1] + + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(topresource, a.Resource.Resource) && + strings.EqualFold(subresource, a.Subresource) +} +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type ListActionImpl struct { + ActionImpl + Kind schema.GroupVersionKind + Name string + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type CreateActionImpl struct { + ActionImpl + Name string + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + +type PatchActionImpl struct { + ActionImpl + Name string + PatchType types.PatchType + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + } +} + +type DeleteActionImpl struct { + ActionImpl + Name string + DeleteOptions metav1.DeleteOptions +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +func (a DeleteActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + } +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 00000000..3ab9c1b0 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.Lock() + defer c.Unlock() + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.Lock() + defer c.Unlock() + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go new file mode 100644 index 00000000..39684067 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -0,0 +1,581 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "reflect" + "sort" + "strings" + "sync" + + jsonpatch "github.com/evanphx/json-patch" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ObjectTracker keeps track of objects. It is intended to be used to +// fake calls to a server by returning objects based on their kind, +// namespace and name. +type ObjectTracker interface { + // Add adds an object to the tracker. If object being added + // is a list, its items are added separately. + Add(obj runtime.Object) error + + // Get retrieves the object by its kind, namespace and name. + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + + // Create adds an object to the tracker in the specified namespace. + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // List retrieves all objects of a given kind in the given + // namespace. Only non-List kinds are accepted. + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + + // Delete deletes an existing object from the tracker. If object + // didn't exist in the tracker prior to deletion, Delete returns + // no error. + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that applies core.Action to +// the given tracker. +func ObjectReaction(tracker ObjectTracker) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + // Here and below we need to switch on implementation types, + // not on interfaces, as some interfaces are identical + // (e.g. UpdateAction and CreateAction), so if we use them, + // updates and creates end up matching the same case branch. + switch action := action.(type) { + + case ListActionImpl: + obj, err := tracker.List(gvr, action.GetKind(), ns) + return true, obj, err + + case GetActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + return true, obj, err + + case CreateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + if action.GetSubresource() == "" { + err = tracker.Create(gvr, action.GetObject(), ns) + } else { + oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName()) + if getOldObjErr != nil { + return true, nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = tracker.Update(gvr, action.GetObject(), ns) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return true, action.GetObject(), nil + } + } + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case UpdateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + err = tracker.Update(gvr, action.GetObject(), ns) + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case DeleteActionImpl: + err := tracker.Delete(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + return true, nil, nil + + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return true, nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType, types.ApplyPatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +type tracker struct { + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher +} + +var _ ObjectTracker = &tracker{} + +// NewObjectTracker returns an ObjectTracker that can be used to keep track +// of objects for the fake clientset. Mostly useful for unit tests. +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { + return &tracker{ + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), + } +} + +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + // Heuristic for list kind: original kind + List suffix. Might + // not always be true but this tracker has a pretty limited + // understanding of the actual API model. + listGVK := gvk + listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } + + list, err := t.scheme.New(listGVK) + if err != nil { + return nil, err + } + + if !meta.IsListType(list) { + return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return list, nil + } + + matchingObjs, err := filterByNamespace(objs, ns) + if err != nil { + return nil, err + } + if err := meta.SetList(list, matchingObjs); err != nil { + return nil, err + } + return list.DeepCopyObject(), nil +} + +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return nil, errNotFound + } + + matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] + if !ok { + return nil, errNotFound + } + + // Only one object should match in the tracker if it works + // correctly, as Add/Update methods enforce kind/namespace/name + // uniqueness. + obj := matchingObj.DeepCopyObject() + if status, ok := obj.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, &errors.StatusError{ErrStatus: *status} + } + } + + return obj, nil +} + +func (t *tracker) Add(obj runtime.Object) error { + if meta.IsListType(obj) { + return t.addList(obj, false) + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + gvks, _, err := t.scheme.ObjectKinds(obj) + if err != nil { + return err + } + + if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { + gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} + } + + if len(gvks) == 0 { + return fmt.Errorf("no registered kinds for %v", obj) + } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if ns != metav1.NamespaceAll { + if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { + watches = append(watches, w...) + } + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { + t.lock.Lock() + defer t.lock.Unlock() + + gr := gvr.GroupResource() + + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + _, ok := t.objects[gvr] + if !ok { + t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) + } + + namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} + if _, ok = t.objects[gvr][namespacedName]; ok { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + // To avoid the object from being accidentally modified by watcher + w.Modify(obj.DeepCopyObject()) + } + t.objects[gvr][namespacedName] = obj + return nil + } + return errors.NewAlreadyExists(gr, newMeta.GetName()) + } + + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } + + t.objects[gvr][namespacedName] = obj + + for _, w := range t.getWatches(gvr, ns) { + // To avoid the object from being accidentally modified by watcher + w.Add(obj.DeepCopyObject()) + } + + return nil +} + +func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + errs := runtime.DecodeList(list, t.decoder) + if len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := t.Add(obj); err != nil { + return err + } + } + return nil +} + +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { + t.lock.Lock() + defer t.lock.Unlock() + + objs, ok := t.objects[gvr] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) + } + + namespacedName := types.NamespacedName{Namespace: ns, Name: name} + obj, ok := objs[namespacedName] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) + } + + delete(objs, namespacedName) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj.DeepCopyObject()) + } + return nil +} + +// filterByNamespace returns all objects in the collection that +// match provided namespace. Empty namespace matches +// non-namespaced objects. +func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { + var res []runtime.Object + + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ns != "" && acc.GetNamespace() != ns { + continue + } + res = append(res, obj) + } + + // Sort res to get deterministic order. + sort.Slice(res, func(i, j int) bool { + acc1, _ := meta.Accessor(res[i]) + acc2, _ := meta.Accessor(res[j]) + if acc1.GetNamespace() != acc2.GetNamespace() { + return acc1.GetNamespace() < acc2.GetNamespace() + } + return acc1.GetName() < acc2.GetName() + }) + return res, nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + + return resourceCovers(r.Resource, action) +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + return resourceCovers(r.Resource, action) +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + return resourceCovers(r.Resource, action) +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +} + +func resourceCovers(resource string, action Action) bool { + if resource == "*" { + return true + } + + if resource == action.GetResource().Resource { + return true + } + + if index := strings.Index(resource, "/"); index != -1 && + resource[:index] == action.GetResource().Resource && + resource[index+1:] == action.GetSubresource() { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/testing/interface.go b/vendor/k8s.io/client-go/testing/interface.go new file mode 100644 index 00000000..266c6ba3 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +type FakeClient interface { + // Tracker gives access to the ObjectTracker internal to the fake client. + Tracker() ObjectTracker + + // AddReactor appends a reactor to the end of the chain. + AddReactor(verb, resource string, reaction ReactionFunc) + + // PrependReactor adds a reactor to the beginning of the chain. + PrependReactor(verb, resource string, reaction ReactionFunc) + + // AddWatchReactor appends a reactor to the end of the chain. + AddWatchReactor(resource string, reaction WatchReactionFunc) + + // PrependWatchReactor adds a reactor to the beginning of the chain. + PrependWatchReactor(resource string, reaction WatchReactionFunc) + + // AddProxyReactor appends a reactor to the end of the chain. + AddProxyReactor(resource string, reaction ProxyReactionFunc) + + // PrependProxyReactor adds a reactor to the beginning of the chain. + PrependProxyReactor(resource string, reaction ProxyReactionFunc) + + // Invokes records the provided Action and then invokes the ReactionFunc that + // handles the action if one exists. defaultReturnObj is expected to be of the + // same type a normal call would return. + Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) + + // InvokesWatch records the provided Action and then invokes the ReactionFunc + // that handles the action if one exists. + InvokesWatch(action Action) (watch.Interface, error) + + // InvokesProxy records the provided Action and then invokes the ReactionFunc + // that handles the action if one exists. + InvokesProxy(action Action) restclient.ResponseWrapper + + // ClearActions clears the history of actions called on the fake client. + ClearActions() + + // Actions returns a chronologically ordered slice fake actions called on the + // fake client. + Actions() []Action +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 56c008a6..93781969 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1147,6 +1147,7 @@ k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper +k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache k8s.io/client-go/tools/cache/synctrack @@ -1269,6 +1270,8 @@ sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/client/interceptor sigs.k8s.io/controller-runtime/pkg/cluster sigs.k8s.io/controller-runtime/pkg/config sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 @@ -1284,6 +1287,7 @@ sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/internal/syncs diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go new file mode 100644 index 00000000..790a1faa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -0,0 +1,1269 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "runtime/debug" + "strconv" + "strings" + "sync" + "time" + + // Using v4 to match upstream + jsonpatch "github.com/evanphx/json-patch" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +type versionedTracker struct { + testing.ObjectTracker + scheme *runtime.Scheme + withStatusSubresource sets.Set[schema.GroupVersionKind] +} + +type fakeClient struct { + tracker versionedTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper + withStatusSubresource sets.Set[schema.GroupVersionKind] + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc + + schemeWriteLock sync.Mutex +} + +var _ client.WithWatch = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// NewFakeClient creates a new fake client for testing. +// You can choose to initialize it with a slice of runtime.Object. +func NewFakeClient(initObjs ...runtime.Object) client.WithWatch { + return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() +} + +// NewClientBuilder returns a new builder to create a fake client. +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{} +} + +// ClientBuilder builds a fake client. +type ClientBuilder struct { + scheme *runtime.Scheme + restMapper meta.RESTMapper + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object + withStatusSubresource []client.Object + objectTracker testing.ObjectTracker + interceptorFuncs *interceptor.Funcs + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc +} + +// WithScheme sets this builder's internal scheme. +// If not set, defaults to client-go's global scheme.Scheme. +func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder { + f.scheme = scheme + return f +} + +// WithRESTMapper sets this builder's restMapper. +// The restMapper is directly set as mapper in the Client. This can be used for example +// with a meta.DefaultRESTMapper to provide a static rest mapping. +// If not set, defaults to an empty meta.DefaultRESTMapper. +func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder { + f.restMapper = restMapper + return f +} + +// WithObjects can be optionally used to initialize this fake client with client.Object(s). +func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder { + f.initObject = append(f.initObject, initObjs...) + return f +} + +// WithLists can be optionally used to initialize this fake client with client.ObjectList(s). +func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder { + f.initLists = append(f.initLists, initLists...) + return f +} + +// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s). +func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder { + f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...) + return f +} + +// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker. +func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder { + f.objectTracker = ot + return f +} + +// WithIndex can be optionally used to register an index with name `field` and indexer `extractValue` +// for API objects of the same GroupVersionKind (GVK) as `obj` in the fake client. +// It can be invoked multiple times, both with objects of the same GVK or different ones. +// Invoking WithIndex twice with the same `field` and GVK (via `obj`) arguments will panic. +// WithIndex retrieves the GVK of `obj` using the scheme registered via WithScheme if +// WithScheme was previously invoked, the default scheme otherwise. +func (f *ClientBuilder) WithIndex(obj runtime.Object, field string, extractValue client.IndexerFunc) *ClientBuilder { + objScheme := f.scheme + if objScheme == nil { + objScheme = scheme.Scheme + } + + gvk, err := apiutil.GVKForObject(obj, objScheme) + if err != nil { + panic(err) + } + + // If this is the first index being registered, we initialize the map storing all the indexes. + if f.indexes == nil { + f.indexes = make(map[schema.GroupVersionKind]map[string]client.IndexerFunc) + } + + // If this is the first index being registered for the GroupVersionKind of `obj`, we initialize + // the map storing the indexes for that GroupVersionKind. + if f.indexes[gvk] == nil { + f.indexes[gvk] = make(map[string]client.IndexerFunc) + } + + if _, fieldAlreadyIndexed := f.indexes[gvk][field]; fieldAlreadyIndexed { + panic(fmt.Errorf("indexer conflict: field %s for GroupVersionKind %v is already indexed", + field, gvk)) + } + + f.indexes[gvk][field] = extractValue + + return f +} + +// WithStatusSubresource configures the passed object with a status subresource, which means +// calls to Update and Patch will not alter its status. +func (f *ClientBuilder) WithStatusSubresource(o ...client.Object) *ClientBuilder { + f.withStatusSubresource = append(f.withStatusSubresource, o...) + return f +} + +// WithInterceptorFuncs configures the client methods to be intercepted using the provided interceptor.Funcs. +func (f *ClientBuilder) WithInterceptorFuncs(interceptorFuncs interceptor.Funcs) *ClientBuilder { + f.interceptorFuncs = &interceptorFuncs + return f +} + +// Build builds and returns a new fake client. +func (f *ClientBuilder) Build() client.WithWatch { + if f.scheme == nil { + f.scheme = scheme.Scheme + } + if f.restMapper == nil { + f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + } + + var tracker versionedTracker + + withStatusSubResource := sets.New(inTreeResourcesWithStatus()...) + for _, o := range f.withStatusSubresource { + gvk, err := apiutil.GVKForObject(o, f.scheme) + if err != nil { + panic(fmt.Errorf("failed to get gvk for object %T: %w", withStatusSubResource, err)) + } + withStatusSubResource.Insert(gvk) + } + + if f.objectTracker == nil { + tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme, withStatusSubresource: withStatusSubResource} + } else { + tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme, withStatusSubresource: withStatusSubResource} + } + + for _, obj := range f.initObject { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initLists { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initRuntimeObjects { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) + } + } + + var result client.WithWatch = &fakeClient{ + tracker: tracker, + scheme: f.scheme, + restMapper: f.restMapper, + indexes: f.indexes, + withStatusSubresource: withStatusSubResource, + } + + if f.interceptorFuncs != nil { + result = interceptor.NewClient(result, *f.interceptorFuncs) + } + + return result +} + +const trackerAddResourceVersion = "999" + +func (t versionedTracker) Add(obj runtime.Object) error { + var objects []runtime.Object + if meta.IsListType(obj) { + var err error + objects, err = meta.ExtractList(obj) + if err != nil { + return err + } + } else { + objects = []runtime.Object{obj} + } + for _, obj := range objects { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetDeletionTimestamp() != nil && len(accessor.GetFinalizers()) == 0 { + return fmt.Errorf("refusing to create obj %s with metadata.deletionTimestamp but no finalizers", accessor.GetName()) + } + if accessor.GetResourceVersion() == "" { + // We use a "magic" value of 999 here because this field + // is parsed as uint and and 0 is already used in Update. + // As we can't go lower, go very high instead so this can + // be recognized + accessor.SetResourceVersion(trackerAddResourceVersion) + } + + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Add(obj); err != nil { + return err + } + } + + return nil +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + accessor.SetResourceVersion("") + return err + } + + return nil +} + +// convertFromUnstructuredIfNecessary will convert runtime.Unstructured for a GVK that is recognized +// by the schema into the whatever the schema produces with New() for said GVK. +// This is required because the tracker unconditionally saves on manipulations, but its List() implementation +// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure +// we save as the very same type, otherwise subsequent List requests will fail. +func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { + u, isUnstructured := o.(runtime.Unstructured) + if !isUnstructured { + return o, nil + } + gvk := o.GetObjectKind().GroupVersionKind() + if !s.Recognizes(gvk) { + return o, nil + } + + typed, err := s.New(gvk) + if err != nil { + return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", gvk, err) + } + + unstructuredSerialized, err := json.Marshal(u) + if err != nil { + return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err) + } + if err := json.Unmarshal(unstructuredSerialized, typed); err != nil { + return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err) + } + + return typed, nil +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + isStatus := false + // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change + // that reaction, we use the callstack to figure out if this originated from the status client. + if bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).statusPatch")) { + isStatus = true + } + return t.update(gvr, obj, ns, isStatus, false) +} + +func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus bool, deleting bool) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + + gvk, err := apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) + if err != nil { + // If the resource is not found and the resource allows create on update, issue a + // create instead. + if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) { + return t.Create(gvr, obj, ns) + } + return err + } + + if t.withStatusSubresource.Has(gvk) { + if isStatus { // copy everything but status and metadata.ResourceVersion from original object + if err := copyStatusFrom(obj, oldObject); err != nil { + return fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) + } + passedRV := accessor.GetResourceVersion() + if err := copyFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to restore non-status fields: %w", err) + } + accessor.SetResourceVersion(passedRV) + } else { // copy status from original object + if err := copyStatusFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to copy the status for object with status subresource: %w", err) + } + } + } else if isStatus { + return apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) + } + + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return err + } + + // If the new object does not have the resource version set and it allows unconditional update, + // default it to the resource version of the existing resource + if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) { + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + } + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + + if !deleting && !deletionTimestampEqual(accessor, oldAccessor) { + return fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) + } + + if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { + return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) + } + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + return t.ObjectTracker.Update(gvr, obj, ns) +} + +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + o, err := c.tracker.Get(gvr, key.Namespace, key.Name) + if err != nil { + return err + } + + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + } + + j, err := json.Marshal(o) + if err != nil { + return err + } + zero(obj) + return json.Unmarshal(j, obj) +} + +func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return nil, err + } + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return c.tracker.Watch(gvr, listOpts.Namespace) +} + +func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + originalKind := gvk.Kind + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(gvk) { + // We need to register the ListKind with UnstructuredList: + // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 + c.schemeWriteLock.Lock() + c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{}) + c.schemeWriteLock.Unlock() + } + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) + if err != nil { + return err + } + + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(originalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + } + + j, err := json.Marshal(o) + if err != nil { + return err + } + zero(obj) + if err := json.Unmarshal(j, obj); err != nil { + return err + } + + if listOpts.LabelSelector == nil && listOpts.FieldSelector == nil { + return nil + } + + // If we're here, either a label or field selector are specified (or both), so before we return + // the list we must filter it. If both selectors are set, they are ANDed. + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + + filteredList, err := c.filterList(objs, gvk, listOpts.LabelSelector, listOpts.FieldSelector) + if err != nil { + return err + } + + return meta.SetList(obj, filteredList) +} + +func (c *fakeClient) filterList(list []runtime.Object, gvk schema.GroupVersionKind, ls labels.Selector, fs fields.Selector) ([]runtime.Object, error) { + // Filter the objects with the label selector + filteredList := list + if ls != nil { + objsFilteredByLabel, err := objectutil.FilterWithLabels(list, ls) + if err != nil { + return nil, err + } + filteredList = objsFilteredByLabel + } + + // Filter the result of the previous pass with the field selector + if fs != nil { + objsFilteredByField, err := c.filterWithFields(filteredList, gvk, fs) + if err != nil { + return nil, err + } + filteredList = objsFilteredByField + } + + return filteredList, nil +} + +func (c *fakeClient) filterWithFields(list []runtime.Object, gvk schema.GroupVersionKind, fs fields.Selector) ([]runtime.Object, error) { + requiresExact := selector.RequiresExactMatch(fs) + if !requiresExact { + return nil, fmt.Errorf("field selector %s is not in one of the two supported forms \"key==val\" or \"key=val\"", + fs) + } + + // Field selection is mimicked via indexes, so there's no sane answer this function can give + // if there are no indexes registered for the GroupVersionKind of the objects in the list. + indexes := c.indexes[gvk] + for _, req := range fs.Requirements() { + if len(indexes) == 0 || indexes[req.Field] == nil { + return nil, fmt.Errorf("List on GroupVersionKind %v specifies selector on field %s, but no "+ + "index with name %s has been registered for GroupVersionKind %v", gvk, req.Field, req.Field, gvk) + } + } + + filteredList := make([]runtime.Object, 0, len(list)) + for _, obj := range list { + matches := true + for _, req := range fs.Requirements() { + indexExtractor := indexes[req.Field] + if !c.objMatchesFieldSelector(obj, indexExtractor, req.Value) { + matches = false + break + } + } + if matches { + filteredList = append(filteredList, obj) + } + } + return filteredList, nil +} + +func (c *fakeClient) objMatchesFieldSelector(o runtime.Object, extractIndex client.IndexerFunc, val string) bool { + obj, isClientObject := o.(client.Object) + if !isClientObject { + panic(fmt.Errorf("expected object %v to be of type client.Object, but it's not", o)) + } + + for _, extractedVal := range extractIndex(obj) { + if extractedVal == val { + return true + } + } + + return false +} + +func (c *fakeClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeClient) RESTMapper() meta.RESTMapper { + return c.restMapper +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *fakeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *fakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.restMapper) +} + +func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + createOptions := &client.CreateOptions{} + createOptions.ApplyOptions(opts) + + for _, dryRunOpt := range createOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + // Ignore attempts to set deletion timestamp + if !accessor.GetDeletionTimestamp().IsZero() { + accessor.SetDeletionTimestamp(nil) + } + + return c.tracker.Create(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) + + for _, dryRunOpt := range delOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + // Check the ResourceVersion if that Precondition was specified. + if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { + name := accessor.GetName() + dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(dbObj) + if err != nil { + return err + } + actualRV := oldAccessor.GetResourceVersion() + expectRV := *delOptions.Preconditions.ResourceVersion + if actualRV != expectRV { + msg := fmt.Sprintf( + "the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+ + "The object might have been modified", + expectRV, actualRV) + return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg)) + } + } + + return c.deleteObject(gvr, accessor) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + for _, dryRunOpt := range dcOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) + if err != nil { + return err + } + + objs, err := meta.ExtractList(o) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) + if err != nil { + return err + } + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.deleteObject(gvr, accessor) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return c.update(obj, false, opts...) +} + +func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.UpdateOption) error { + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) + + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false) +} + +func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return c.patch(obj, patch, opts...) +} + +func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + data, err := patch.Data(obj) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(oldObj) + if err != nil { + return err + } + + // Apply patch without updating object. + // To remain in accordance with the behavior of k8s api behavior, + // a patch must not allow for changes to the deletionTimestamp of an object. + // The reaction() function applies the patch to the object and calls Update(), + // whereas dryPatch() replicates this behavior but skips the call to Update(). + // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior + // to updating the object. + action := testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data) + o, err := dryPatch(action, c.tracker) + if err != nil { + return err + } + newObj, err := meta.Accessor(o) + if err != nil { + return err + } + + // Validate that deletionTimestamp has not been changed + if !deletionTimestampEqual(newObj, oldAccessor) { + return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable") + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(action) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } + + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + } + + j, err := json.Marshal(o) + if err != nil { + return err + } + zero(obj) + return json.Unmarshal(j, obj) +} + +// Applying a patch results in a deletionTimestamp that is truncated to the nearest second. +// Check that the diff between a new and old deletion timestamp is within a reasonable threshold +// to be considered unchanged. +func deletionTimestampEqual(newObj metav1.Object, obj metav1.Object) bool { + newTime := newObj.GetDeletionTimestamp() + oldTime := obj.GetDeletionTimestamp() + + if newTime == nil || oldTime == nil { + return newTime == oldTime + } + return newTime.Time.Sub(oldTime.Time).Abs() < time.Second +} + +// The behavior of applying the patch is pulled out into dryPatch(), +// which applies the patch and returns an object, but does not Update() the object. +// This function returns a patched runtime object that may then be validated before a call to Update() is executed. +// This results in some code duplication, but was found to be a cleaner alternative than unmarshalling and introspecting the patch data +// and easier than refactoring the k8s client-go method upstream. +// Duplicate of upstream: https://github.com/kubernetes/client-go/blob/783d0d33626e59d55d52bfd7696b775851f92107/testing/fixture.go#L146-L194 +func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType, types.ApplyPatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType is not supported") + } + return obj, nil +} + +// copyStatusFrom copies the status from old into new +func copyStatusFrom(old, new runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + newMapStringAny, err := toMapStringAny(new) + if err != nil { + return fmt.Errorf("failed to convert new to *unststructured.Unstructured: %w", err) + } + + newMapStringAny["status"] = oldMapStringAny["status"] + + if err := fromMapStringAny(newMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +// copyFrom copies from old into new +func copyFrom(old, new runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + if err := fromMapStringAny(oldMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +func toMapStringAny(obj runtime.Object) (map[string]any, error) { + if unstructured, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + return unstructured.Object, nil + } + + serialized, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + u := map[string]any{} + return u, json.Unmarshal(serialized, &u) +} + +func fromMapStringAny(u map[string]any, target runtime.Object) error { + if targetUnstructured, isUnstructured := target.(*unstructured.Unstructured); isUnstructured { + targetUnstructured.Object = u + return nil + } + + serialized, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("failed to serialize: %w", err) + } + + zero(target) + if err := json.Unmarshal(serialized, &target); err != nil { + return fmt.Errorf("failed to deserialize: %w", err) + } + + return nil +} + +func (c *fakeClient) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { + return &fakeSubResourceClient{client: c, subResource: subResource} +} + +func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { + old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err == nil { + oldAccessor, err := meta.Accessor(old) + if err == nil { + if len(oldAccessor.GetFinalizers()) > 0 { + now := metav1.Now() + oldAccessor.SetDeletionTimestamp(&now) + // Call update directly with mutability parameter set to true to allow + // changes to deletionTimestamp + return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true) + } + } + } + + //TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeSubResourceClient struct { + client *fakeClient + subResource string +} + +func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { + panic("fakeSubResourceClient does not support get") +} + +func (sw *fakeSubResourceClient) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + switch sw.subResource { + case "eviction": + _, isEviction := subResource.(*policyv1beta1.Eviction) + if !isEviction { + _, isEviction = subResource.(*policyv1.Eviction) + } + if !isEviction { + return apierrors.NewBadRequest(fmt.Sprintf("got invalid type %t, expected Eviction", subResource)) + } + if _, isPod := obj.(*corev1.Pod); !isPod { + return apierrors.NewNotFound(schema.GroupResource{}, "") + } + + return sw.client.Delete(ctx, obj) + default: + return fmt.Errorf("fakeSubResourceWriter does not support create for %s", sw.subResource) + } +} + +func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + updateOptions := client.SubResourceUpdateOptions{} + updateOptions.ApplyOptions(opts) + + body := obj + if updateOptions.SubResourceBody != nil { + body = updateOptions.SubResourceBody + } + return sw.client.update(body, true, &updateOptions.UpdateOptions) +} + +func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + patchOptions := client.SubResourcePatchOptions{} + patchOptions.ApplyOptions(opts) + + body := obj + if patchOptions.SubResourceBody != nil { + body = patchOptions.SubResourceBody + } + + // this is necessary to identify that last call was made for status patch, through stack trace. + if sw.subResource == "status" { + return sw.statusPatch(body, patch, patchOptions) + } + + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func (sw *fakeSubResourceClient) statusPatch(body client.Object, patch client.Patch, patchOptions client.SubResourcePatchOptions) error { + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "apps": + switch gvk.Kind { + case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet": + return true + } + case "autoscaling": + switch gvk.Kind { + case "HorizontalPodAutoscaler": + return true + } + case "batch": + switch gvk.Kind { + case "CronJob", "Job": + return true + } + case "certificates": + switch gvk.Kind { + case "Certificates": + return true + } + case "flowcontrol": + switch gvk.Kind { + case "FlowSchema", "PriorityLevelConfiguration": + return true + } + case "networking": + switch gvk.Kind { + case "Ingress", "IngressClass", "NetworkPolicy": + return true + } + case "policy": + switch gvk.Kind { + case "PodSecurityPolicy": + return true + } + case "rbac.authorization.k8s.io": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "scheduling": + switch gvk.Kind { + case "PriorityClass": + return true + } + case "settings": + switch gvk.Kind { + case "PodPreset": + return true + } + case "storage": + switch gvk.Kind { + case "StorageClass": + return true + } + case "": + switch gvk.Kind { + case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node", + "PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate", + "ReplicationController", "ResourceQuota", "Secret", "Service", + "ServiceAccount", "EndpointSlice": + return true + } + } + + return false +} + +func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "coordination": + switch gvk.Kind { + case "Lease": + return true + } + case "node": + switch gvk.Kind { + case "RuntimeClass": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "": + switch gvk.Kind { + case "Endpoint", "Event", "LimitRange", "Service": + return true + } + } + + return false +} + +func inTreeResourcesWithStatus() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + {Version: "v1", Kind: "Namespace"}, + {Version: "v1", Kind: "Node"}, + {Version: "v1", Kind: "PersistentVolumeClaim"}, + {Version: "v1", Kind: "PersistentVolume"}, + {Version: "v1", Kind: "Pod"}, + {Version: "v1", Kind: "ReplicationController"}, + {Version: "v1", Kind: "Service"}, + + {Group: "apps", Version: "v1", Kind: "Deployment"}, + {Group: "apps", Version: "v1", Kind: "DaemonSet"}, + {Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + {Group: "apps", Version: "v1", Kind: "StatefulSet"}, + + {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}, + + {Group: "batch", Version: "v1", Kind: "CronJob"}, + {Group: "batch", Version: "v1", Kind: "Job"}, + + {Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"}, + + {Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"}, + {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}, + + {Group: "policy", Version: "v1", Kind: "PodDisruptionBudget"}, + + {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"}, + + {Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"}, + } +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go new file mode 100644 index 00000000..d42347a2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake provides a fake client for testing. + +A fake client is backed by its simple object store indexed by GroupVersionResource. +You can create a fake client with optional objects. + + client := NewClientBuilder().WithScheme(scheme).WithObj(initObjs...).Build() + +You can invoke the methods defined in the Client interface. + +When in doubt, it's almost always better not to use this package and instead use +envtest.Environment with a real client and API server. + +WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️ + - This client does not have a way to inject specific errors to test handled vs. unhandled errors. + - There is some support for sub resources which can cause issues with tests if you're trying to update + e.g. metadata and status in the same reconcile. + - No OpenAPI validation is performed when creating or updating objects. + - ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update + operations that rely on these fields will fail, or give false positives. +*/ +package fake diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go new file mode 100644 index 00000000..3d3f3cb0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go @@ -0,0 +1,166 @@ +package interceptor + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Funcs contains functions that are called instead of the underlying client's methods. +type Funcs struct { + Get func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + List func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error + Create func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error + Delete func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error + DeleteAllOf func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error + Update func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error + Patch func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + Watch func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) + SubResource func(client client.WithWatch, subResource string) client.SubResourceClient + SubResourceGet func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error + SubResourceCreate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error + SubResourceUpdate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error + SubResourcePatch func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error +} + +// NewClient returns a new interceptor client that calls the functions in funcs instead of the underlying client's methods, if they are not nil. +func NewClient(interceptedClient client.WithWatch, funcs Funcs) client.WithWatch { + return interceptor{ + client: interceptedClient, + funcs: funcs, + } +} + +type interceptor struct { + client client.WithWatch + funcs Funcs +} + +var _ client.WithWatch = &interceptor{} + +func (c interceptor) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c interceptor) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c interceptor) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.funcs.Get != nil { + return c.funcs.Get(ctx, c.client, key, obj, opts...) + } + return c.client.Get(ctx, key, obj, opts...) +} + +func (c interceptor) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.funcs.List != nil { + return c.funcs.List(ctx, c.client, list, opts...) + } + return c.client.List(ctx, list, opts...) +} + +func (c interceptor) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.funcs.Create != nil { + return c.funcs.Create(ctx, c.client, obj, opts...) + } + return c.client.Create(ctx, obj, opts...) +} + +func (c interceptor) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.funcs.Delete != nil { + return c.funcs.Delete(ctx, c.client, obj, opts...) + } + return c.client.Delete(ctx, obj, opts...) +} + +func (c interceptor) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.funcs.Update != nil { + return c.funcs.Update(ctx, c.client, obj, opts...) + } + return c.client.Update(ctx, obj, opts...) +} + +func (c interceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.funcs.Patch != nil { + return c.funcs.Patch(ctx, c.client, obj, patch, opts...) + } + return c.client.Patch(ctx, obj, patch, opts...) +} + +func (c interceptor) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.funcs.DeleteAllOf != nil { + return c.funcs.DeleteAllOf(ctx, c.client, obj, opts...) + } + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c interceptor) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c interceptor) SubResource(subResource string) client.SubResourceClient { + if c.funcs.SubResource != nil { + return c.funcs.SubResource(c.client, subResource) + } + return subResourceInterceptor{ + subResourceName: subResource, + client: c.client, + funcs: c.funcs, + } +} + +func (c interceptor) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +func (c interceptor) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +func (c interceptor) Watch(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + if c.funcs.Watch != nil { + return c.funcs.Watch(ctx, c.client, obj, opts...) + } + return c.client.Watch(ctx, obj, opts...) +} + +type subResourceInterceptor struct { + subResourceName string + client client.Client + funcs Funcs +} + +var _ client.SubResourceClient = &subResourceInterceptor{} + +func (s subResourceInterceptor) Get(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error { + if s.funcs.SubResourceGet != nil { + return s.funcs.SubResourceGet(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Get(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + if s.funcs.SubResourceCreate != nil { + return s.funcs.SubResourceCreate(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Create(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.funcs.SubResourceUpdate != nil { + return s.funcs.SubResourceUpdate(ctx, s.client, s.subResourceName, obj, opts...) + } + return s.client.SubResource(s.subResourceName).Update(ctx, obj, opts...) +} + +func (s subResourceInterceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.funcs.SubResourcePatch != nil { + return s.funcs.SubResourcePatch(ctx, s.client, s.subResourceName, obj, patch, opts...) + } + return s.client.SubResource(s.subResourceName).Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 00000000..0189c043 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} diff --git a/version/version.go b/version/version.go index 77388e2f..c1ca6b27 100644 --- a/version/version.go +++ b/version/version.go @@ -73,5 +73,6 @@ func ParseFilesSha256(componentsJSON []byte) (map[string]map[string]map[string]s if err != nil { return nil, err } + return m, nil }