mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-25 17:12:50 +00:00
feat: add more golangci lint rule. (#2366)
Signed-off-by: joyceliu <joyceliu@yunify.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
718de31d46
commit
865913fea9
776
.golangci.yaml
776
.golangci.yaml
|
|
@ -1,46 +1,215 @@
|
|||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- canonicalheader
|
||||
- containedctx
|
||||
# - deadcode
|
||||
# - depguard
|
||||
- contextcheck
|
||||
- copyloopvar
|
||||
- cyclop
|
||||
- decorder
|
||||
- depguard
|
||||
- dogsled
|
||||
# - dupl
|
||||
- dupword
|
||||
- durationcheck
|
||||
# - err113
|
||||
- errcheck
|
||||
- errchkjson
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
# - exhaustruct
|
||||
- exportloopref
|
||||
- fatcontext
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
# - gci
|
||||
- ginkgolinter
|
||||
- gocheckcompilerdirectives
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- gochecksumtype
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofmt
|
||||
# - gofumpt
|
||||
- goheader
|
||||
- goimports
|
||||
- gomoddirectives
|
||||
- gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- gosmopolitan
|
||||
- govet
|
||||
- grouper
|
||||
- importas
|
||||
- inamedparam
|
||||
- ineffassign
|
||||
- interfacebloat
|
||||
- intrange
|
||||
# - ireturn
|
||||
# - lll
|
||||
- loggercheck
|
||||
- maintidx
|
||||
- makezero
|
||||
- mirror
|
||||
# - misspell
|
||||
# - mnd
|
||||
# - musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilerr
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
# - paralleltest
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
# - revive
|
||||
- promlinter
|
||||
- protogetter
|
||||
- reassign
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sloglint
|
||||
- spancheck
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
# - structcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
# - tagliatelle
|
||||
- tenv
|
||||
- testableexamples
|
||||
- testifylint
|
||||
# - testpackage
|
||||
- thelper
|
||||
- tparallel
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unparam
|
||||
- unused
|
||||
# - varcheck
|
||||
- usestdlibvars
|
||||
# - varnamelen
|
||||
- wastedassign
|
||||
- whitespace
|
||||
# - wrapcheck
|
||||
# - wsl
|
||||
- zerologlint
|
||||
|
||||
linters-settings:
|
||||
cyclop:
|
||||
# The maximal code complexity to report.
|
||||
# Default: 10
|
||||
max-complexity: 20
|
||||
# Should ignore tests.
|
||||
# Default: false
|
||||
skip-tests: true
|
||||
depguard:
|
||||
# Rules to apply.
|
||||
#
|
||||
# Variables:
|
||||
# - File Variables
|
||||
# you can still use and exclamation mark ! in front of a variable to say not to use it.
|
||||
# Example !$test will match any file that is not a go test file.
|
||||
#
|
||||
# `$all` - matches all go files
|
||||
# `$test` - matches all go test files
|
||||
#
|
||||
# - Package Variables
|
||||
#
|
||||
# `$gostd` - matches all of go's standard library (Pulled from `GOROOT`)
|
||||
#
|
||||
# Default: Only allow $gostd in all files.
|
||||
rules:
|
||||
# Name of a rule.
|
||||
main:
|
||||
# List of allowed packages.
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/spf13
|
||||
- github.com/pkg/sftp
|
||||
- github.com/google/gops
|
||||
- github.com/go-git/go-git
|
||||
- github.com/fsnotify/fsnotify
|
||||
- github.com/schollz/progressbar
|
||||
- github.com/stretchr/testify
|
||||
- github.com/Masterminds/sprig
|
||||
- github.com/opencontainers/image-spec
|
||||
- oras.land/oras-go
|
||||
- k8s.io
|
||||
- sigs.k8s.io
|
||||
- github.com/kubesphere/kubekey
|
||||
exhaustive:
|
||||
# Enum types matching the supplied regex do not have to be listed in
|
||||
# switch statements to satisfy exhaustiveness.
|
||||
# Default: ""
|
||||
ignore-enum-types: "fsnotify.Op|v1alpha1.TaskPhase|reflect.Kind"
|
||||
forbidigo:
|
||||
# Forbid the following identifiers (list of regexp).
|
||||
# Default: ["^(fmt\\.Print(|f|ln)|print|println)$"]
|
||||
forbid:
|
||||
# Builtin function:
|
||||
- ^print.*$
|
||||
# Optional message that gets included in error reports.
|
||||
# - p: ^fmt\.Print.*$
|
||||
# msg: Do not commit print statements.
|
||||
# # Alternatively, put messages at the end of the regex, surrounded by `(# )?`
|
||||
# Escape any special characters. Those messages get included in error reports.
|
||||
# - 'fmt\.Print.*(# Do not commit print statements\.)?'
|
||||
# Forbid spew Dump, whether it is called as function or method.
|
||||
# Depends on analyze-types below.
|
||||
- ^spew\.(ConfigState\.)?Dump$
|
||||
# The package name might be ambiguous.
|
||||
# The full import path can be used as additional criteria.
|
||||
# Depends on analyze-types below.
|
||||
- p: ^v1.Dump$
|
||||
pkg: ^example.com/pkg/api/v1$
|
||||
funlen:
|
||||
# Checks the number of lines in a function.
|
||||
# If lower than 0, disable the check.
|
||||
# Default: 60
|
||||
lines: -1
|
||||
# Checks the number of statements in a function.
|
||||
# If lower than 0, disable the check.
|
||||
# Default: 40
|
||||
statements: -1
|
||||
# Ignore comments when counting lines.
|
||||
# Default false
|
||||
ignore-comments: true
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- experimental
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- evalOrder
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- regexpSimplify
|
||||
- sloppyReassign
|
||||
- truncateCmp
|
||||
- typeDefFirst
|
||||
- unnamedResult
|
||||
- unnecessaryDefer
|
||||
- whyNoLint
|
||||
- wrapperFunc
|
||||
- commentFormatting
|
||||
- filepathJoin
|
||||
# - rangeValCopy
|
||||
# - hugeParam
|
||||
godot:
|
||||
# declarations - for top level declaration comments (default);
|
||||
# toplevel - for top level comments;
|
||||
|
|
@ -49,8 +218,24 @@ linters-settings:
|
|||
exclude:
|
||||
- '^ \+.*'
|
||||
- '^ ANCHOR.*'
|
||||
# Check that each sentence ends with a period.
|
||||
# Default: true
|
||||
period: false
|
||||
# Check that each sentence starts with a capital letter.
|
||||
# Default: false
|
||||
capital: false
|
||||
gosec:
|
||||
excludes:
|
||||
- G106 # Deferring unsafe method "InsecureIgnoreHostKey" on type "\*ssh"
|
||||
- G301 # Deferring unsafe method "MkdirAll" on type "\*os.File"
|
||||
- G304 # Deferring unsafe method "Create" or "Open" on type "\*os.File"
|
||||
- G306 # Deferring unsafe method "WriteFile" on type "\*os.File"
|
||||
- G307 # Deferring unsafe method "Close" on type "\*os.File"
|
||||
- G108 # Profiling endpoint is automatically exposed on /debug/pprof
|
||||
- G402 # Look for bad TLS connection settings
|
||||
importas:
|
||||
no-unaliased: true
|
||||
# no-extra-aliases: true
|
||||
alias:
|
||||
# oci
|
||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
||||
|
|
@ -90,79 +275,536 @@ linters-settings:
|
|||
# kubekey
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
alias: _const
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
|
||||
alias: kubekeyv1
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
|
||||
alias: kubekeyv1alpha1
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
alias: kkcorev1
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
alias: kkcorev1alpha1
|
||||
- pkg: "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
alias: kkprojectv1
|
||||
nestif:
|
||||
# Minimal complexity of if statements to report.
|
||||
# Default: 5
|
||||
min-complexity: 20
|
||||
nolintlint:
|
||||
allow-unused: false
|
||||
require-specific: true
|
||||
revive:
|
||||
rules:
|
||||
# The following rules are recommended https://github.com/mgechev/revive#recommended-configuration
|
||||
# Maximum number of open files at the same time.
|
||||
# See https://github.com/mgechev/revive#command-line-flags
|
||||
# Defaults to unlimited.
|
||||
# max-open-files: 2048
|
||||
# When set to false, ignores files with "GENERATED" header, similar to golint.
|
||||
# See https://github.com/mgechev/revive#available-rules for details.
|
||||
# Default: false
|
||||
ignore-generated-header: true
|
||||
# Sets the default severity.
|
||||
# See https://github.com/mgechev/revive#configuration
|
||||
# Default: warning
|
||||
severity: error
|
||||
# Enable all available rules.
|
||||
# Default: false
|
||||
enable-all-rules: false
|
||||
# Sets the default failure confidence.
|
||||
# This means that linting errors with less than 0.8 confidence will be ignored.
|
||||
# Default: 0.8
|
||||
confidence: 0.1
|
||||
rules: # v1.3.7
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#add-constant
|
||||
- name: add-constant
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- maxLitCount: "3"
|
||||
allowStrs: '""'
|
||||
allowInts: "0,1,2"
|
||||
allowFloats: "0.0,0.,1.0,1.,2.0,2."
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#argument-limit
|
||||
- name: argument-limit
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [4]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#atomic
|
||||
- name: atomic
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#banned-characters
|
||||
- name: banned-characters
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: ["Ω", "Σ", "σ", "7"]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bare-return
|
||||
- name: bare-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
|
||||
- name: blank-imports
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: error-naming
|
||||
- name: exported
|
||||
#- name: if-return # TODO This is a recommended rule with many findings which may require it's own pr.
|
||||
- name: increment-decrement
|
||||
- name: var-naming
|
||||
- name: var-declaration
|
||||
- name: package-comments
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
- name: indent-error-flow
|
||||
- name: errorf
|
||||
- name: empty-block
|
||||
- name: superfluous-else
|
||||
#- name: unused-parameter # TODO This is a recommended rule with many findings which may require it's own pr.
|
||||
- name: unreachable-code
|
||||
- name: redefines-builtin-id
|
||||
#
|
||||
# Rules in addition to the recommended configuration above.
|
||||
#
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
|
||||
- name: bool-literal-in-expr
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#call-to-gc
|
||||
- name: call-to-gc
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cognitive-complexity
|
||||
- name: cognitive-complexity
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [7]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#comment-spacings
|
||||
- name: comment-spacings
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- mypragma
|
||||
- otherpragma
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-naming
|
||||
- name: confusing-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-results
|
||||
- name: confusing-results
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
|
||||
- name: constant-logical-expr
|
||||
gosec:
|
||||
excludes:
|
||||
- G106 # Deferring unsafe method "InsecureIgnoreHostKey" on type "\*ssh"
|
||||
- G301 # Deferring unsafe method "MkdirAll" on type "\*os.File"
|
||||
- G304 # Deferring unsafe method "Create" or "Open" on type "\*os.File"
|
||||
- G306 # Deferring unsafe method "WriteFile" on type "\*os.File"
|
||||
- G307 # Deferring unsafe method "Close" on type "\*os.File"
|
||||
- G108 # Profiling endpoint is automatically exposed on /debug/pprof
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- experimental
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- evalOrder
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- regexpSimplify
|
||||
- sloppyReassign
|
||||
- truncateCmp
|
||||
- typeDefFirst
|
||||
- unnamedResult
|
||||
- unnecessaryDefer
|
||||
- whyNoLint
|
||||
- wrapperFunc
|
||||
- commentFormatting
|
||||
- filepathJoin
|
||||
# - rangeValCopy
|
||||
# - hugeParam
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
|
||||
- name: context-as-argument
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# arguments:
|
||||
# - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
|
||||
- name: context-keys-type
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cyclomatic
|
||||
- name: cyclomatic
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [3]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#datarace
|
||||
- name: datarace
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
|
||||
- name: deep-exit
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
|
||||
- name: defer
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- ["call-chain", "loop"]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
|
||||
- name: dot-imports
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments: []
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
|
||||
- name: duplicated-imports
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
|
||||
- name: early-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
|
||||
- name: empty-block
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
|
||||
- name: empty-lines
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-map-style
|
||||
- name: enforce-map-style
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "make"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-repeated-arg-type-style
|
||||
- name: enforce-repeated-arg-type-style
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "short"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-slice-style
|
||||
- name: enforce-slice-style
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "make"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
|
||||
- name: error-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
|
||||
- name: error-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
|
||||
- name: error-strings
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
|
||||
- name: errorf
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
|
||||
- name: exported
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "checkPrivateReceivers"
|
||||
- "disableStutteringCheck"
|
||||
- "sayRepetitiveInsteadOfStutters"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#file-header
|
||||
- name: file-header
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- This is the text that must appear at the top of source files.
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
|
||||
- name: flag-parameter
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-length
|
||||
- name: function-length
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [10, 0]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-result-limit
|
||||
- name: function-result-limit
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [3]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#get-return
|
||||
- name: get-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
|
||||
- name: identical-branches
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-alias-naming
|
||||
- name: import-alias-naming
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "^[a-z][a-z0-9]{0,}$"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
|
||||
- name: import-shadowing
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#imports-blocklist
|
||||
- name: imports-blocklist
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "crypto/md5"
|
||||
- "crypto/sha1"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
|
||||
- name: increment-decrement
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
|
||||
- name: indent-error-flow
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#line-length-limit
|
||||
- name: line-length-limit
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [80]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-control-nesting
|
||||
- name: max-control-nesting
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [3]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-public-structs
|
||||
- name: max-public-structs
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments: [3]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-parameter
|
||||
- name: modifies-parameter
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-value-receiver
|
||||
- name: modifies-value-receiver
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#nested-structs
|
||||
- name: nested-structs
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#optimize-operands-order
|
||||
- name: optimize-operands-order
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
|
||||
- name: package-comments
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
|
||||
- name: range
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
|
||||
- name: range-val-address
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
|
||||
- name: range-val-in-closure
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#receiver-naming
|
||||
- name: receiver-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
|
||||
- name: redefines-builtin-id
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redundant-import-alias
|
||||
- name: redundant-import-alias
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
|
||||
- name: string-format
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- - 'core.WriteError[1].Message'
|
||||
- '/^([^A-Z]|$)/'
|
||||
- must not start with a capital letter
|
||||
- - 'fmt.Errorf[0]'
|
||||
- '/(^|[^\.!?])$/'
|
||||
- must not end in punctuation
|
||||
- - panic
|
||||
- '/^[^\n]*$/'
|
||||
- must not contain line breaks
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-of-int
|
||||
- name: string-of-int
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
|
||||
- name: struct-tag
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "json,inline"
|
||||
- "bson,outline,gnu"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
|
||||
- name: superfluous-else
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
|
||||
- name: time-equal
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-naming
|
||||
- name: time-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unchecked-type-assertion
|
||||
- name: unchecked-type-assertion
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- acceptIgnoredAssertionResult: true
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
|
||||
- name: unconditional-recursion
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming
|
||||
- name: unexported-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
|
||||
- name: unexported-return
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
|
||||
- name: unhandled-error
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- "fmt.Printf"
|
||||
- "fmt.Fprintf"
|
||||
- "fmt.Fprint"
|
||||
- "fmt.Println"
|
||||
- "bytes.Buffer.WriteString"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
|
||||
- name: unnecessary-stmt
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unreachable-code
|
||||
- name: unreachable-code
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
||||
- name: unused-parameter
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- allowRegex: "^_"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver
|
||||
- name: unused-receiver
|
||||
severity: warning
|
||||
disabled: true
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- allowRegex: "^_"
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#use-any
|
||||
- name: use-any
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
|
||||
- name: useless-break
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
|
||||
- name: var-declaration
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
|
||||
- name: var-naming
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
arguments:
|
||||
- ["ID"] # AllowList
|
||||
- ["VM"] # DenyList
|
||||
- - upperCaseConst: true # Extra parameter (upperCaseConst|skipPackageNameChecks)
|
||||
skipPackageNameChecks: true
|
||||
# # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||
- name: waitgroup-by-value
|
||||
severity: warning
|
||||
disabled: false
|
||||
exclude: [""]
|
||||
stylecheck:
|
||||
checks:
|
||||
- -ST1000 # ignore package comment
|
||||
wrapcheck:
|
||||
# An array of strings that specify substrings of signatures to ignore.
|
||||
# If this set, it will override the default set of ignored signatures.
|
||||
# See https://github.com/tomarrell/wrapcheck#configuration for more information.
|
||||
# Default: [".Errorf(", "errors.New(", "errors.Unwrap(", "errors.Join(", ".Wrap(", ".Wrapf(", ".WithMessage(", ".WithMessagef(", ".WithStack("]
|
||||
ignoreSigs:
|
||||
- .Errorf(
|
||||
- errors.New(
|
||||
- errors.Unwrap(
|
||||
- errors.Join(
|
||||
- .Wrap(
|
||||
- .Wrapf(
|
||||
- .WithMessage(
|
||||
- .WithMessagef(
|
||||
- .WithStack(
|
||||
# An array of strings that specify regular expressions of signatures to ignore.
|
||||
# Default: []
|
||||
ignoreSigRegexps:
|
||||
- \.New.*Error\(
|
||||
# An array of strings that specify globs of packages to ignore.
|
||||
# Default: []
|
||||
ignorePackageGlobs:
|
||||
- encoding/*
|
||||
- github.com/pkg/*
|
||||
# An array of strings that specify regular expressions of interfaces to ignore.
|
||||
# Default: []
|
||||
ignoreInterfaceRegexps:
|
||||
- ^(?i)c(?-i)ach(ing|e)
|
||||
issues:
|
||||
max-same-issues: 0
|
||||
max-issues-per-linter: 0
|
||||
|
|
|
|||
5
Makefile
5
Makefile
|
|
@ -171,9 +171,9 @@ generate-go-deepcopy-kubekey: $(CONTROLLER_GEN) ## Generate deepcopy object
|
|||
.PHONY: generate-manifests-kubekey
|
||||
generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
|
||||
$(CONTROLLER_GEN) \
|
||||
paths=./pkg/apis/... \
|
||||
paths=./pkg/apis/core/... \
|
||||
crd \
|
||||
output:crd:dir=./config/helm/crds/
|
||||
output:crd:dir=./config/kubekey/crds/
|
||||
|
||||
.PHONY: generate-modules
|
||||
generate-modules: ## Run go mod tidy to ensure modules are up to date
|
||||
|
|
@ -193,7 +193,6 @@ generate-goimports: ## Format all import, `goimports` is required.
|
|||
lint: $(GOLANGCI_LINT) ## Lint the codebase
|
||||
$(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
|
||||
cd $(TEST_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
|
||||
cd $(TOOLS_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS)
|
||||
|
||||
.PHONY: verify-dockerfiles
|
||||
verify-dockerfiles:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
tasks:
|
||||
- name: Package image
|
||||
image:
|
||||
pull: "{{ .image_manifests }}"
|
||||
pull: "{{ .image_manifests | toJson }}"
|
||||
when: .image_manifests | default list | len | lt 0
|
||||
- name: Export artifact
|
||||
command: |
|
||||
|
|
|
|||
|
|
@ -20,6 +20,11 @@
|
|||
- include_tasks: download_by_curl.yaml
|
||||
# the binaries which download by helm
|
||||
- include_tasks: download_by_helm.yaml
|
||||
# download remote images to local
|
||||
- name: Download images
|
||||
image:
|
||||
pull: "{{ .image_manifests | toJson }}"
|
||||
when: .image_manifests | default list | len | lt 0
|
||||
|
||||
- include_tasks: pki.yaml
|
||||
tags: ["certs"]
|
||||
|
|
|
|||
|
|
@ -54,16 +54,16 @@
|
|||
# add repository
|
||||
rm -rf /etc/yum.repos.d/*
|
||||
cat << EOF > /etc/yum.repos.d/CentOS-local.repo
|
||||
[base-local]
|
||||
name=rpms-local
|
||||
[base-local]
|
||||
name=rpms-local
|
||||
|
||||
baseurl=file://%s
|
||||
baseurl=file:///tmp/kubekey/repository.iso
|
||||
|
||||
enabled=1
|
||||
enabled=1
|
||||
|
||||
gpgcheck=0
|
||||
gpgcheck=0
|
||||
|
||||
EOF
|
||||
EOF
|
||||
# update repository
|
||||
yum clean all && yum makecache
|
||||
# install
|
||||
|
|
@ -75,4 +75,4 @@
|
|||
# install
|
||||
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
|
||||
fi
|
||||
when: .os.release.ID_LIKE | eq "rhel fedora"
|
||||
when: .os.release.ID_LIKE | eq "\"rhel fedora\""
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ cat >>/etc/hosts<<EOF
|
|||
# kubernetes hosts
|
||||
{{- range .groups.k8s_cluster | default list }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
|
||||
{{ printf "%s %s %s.%s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") (index $.inventory_hosts . "hostname") ($.kubernetes.cluster_name | default "cluster.local") }}
|
||||
{{- printf "%s %s %s.%s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") (index $.inventory_hosts . "hostname") ($.kubernetes.cluster_name | default "cluster.local") }}
|
||||
{{- end }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
|
||||
{{ printf "%s %s %s.%s" (index $.inventory_hosts . "internal_ipv6") (index $.inventory_hosts . "hostname") (index $.inventory_hosts . "hostname") ($.kubernetes.cluster_name | default "cluster.local") }}
|
||||
|
|
@ -195,19 +195,19 @@ cat >>/etc/hosts<<EOF
|
|||
# image registry hosts
|
||||
{{- range .groups.image_registry | default list }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
|
||||
{{ printf "%s %s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") }}
|
||||
{{- printf "%s %s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") }}
|
||||
{{- end }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
|
||||
{{ printf "%s %s" (index $.inventory_hosts . "internal_ipv6") (index $.inventory_hosts . "hostname") }}
|
||||
{{- printf "%s %s" (index $.inventory_hosts . "internal_ipv6") (index $.inventory_hosts . "hostname") }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# nfs hosts
|
||||
{{- range .groups.nfs | default list }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv4") (ne (index $.inventory_hosts . "internal_ipv4") "") }}
|
||||
{{ printf "%s %s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") }}
|
||||
{{- printf "%s %s" (index $.inventory_hosts . "internal_ipv4") (index $.inventory_hosts . "hostname") }}
|
||||
{{- end }}
|
||||
{{- if and (index $.inventory_hosts . "internal_ipv6") (ne (index $.inventory_hosts . "internal_ipv6") "") }}
|
||||
{{ printf "%s %s" (index $.inventory_hosts . "internal_ipv6") (index $.inventory_hosts . "hostname") }}
|
||||
{{- printf "%s %s" (index $.inventory_hosts . "internal_ipv6") (index $.inventory_hosts . "hostname") }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# kubekey hosts END
|
||||
|
|
@ -227,5 +227,3 @@ update-alternatives --set iptables /usr/sbin/iptables-legacy >/dev/null 2>&1 ||
|
|||
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy >/dev/null 2>&1 || true
|
||||
update-alternatives --set arptables /usr/sbin/arptables-legacy >/dev/null 2>&1 || true
|
||||
update-alternatives --set ebtables /usr/sbin/ebtables-legacy >/dev/null 2>&1 || true
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ image_registry:
|
|||
{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
|
||||
{{ .image_registry.ha_vip }}
|
||||
{{- else }}
|
||||
{{ .groups.image_registry | default list | first }}
|
||||
{{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }}
|
||||
{{- end }}
|
||||
username: admin
|
||||
password: Harbor12345
|
||||
|
|
|
|||
|
|
@ -47,10 +47,10 @@
|
|||
src: |
|
||||
{{ .work_dir }}/kubekey/pki/image_registry.crt
|
||||
dest: |
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.crt
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.cert
|
||||
- name: Sync image registry key file to remote
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/pki/image_registry.key
|
||||
dest: |
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.key
|
||||
/etc/docker/certs.d/{{ .image_registry.auth.registry }}/client.key
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ image_registry:
|
|||
{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
|
||||
{{ .image_registry.ha_vip }}
|
||||
{{- else }}
|
||||
{{ .groups.image_registry | default list | first }}
|
||||
{{ index .inventory_hosts (.groups.image_registry | default list | first) "internal_ipv4" }}
|
||||
{{- end }}
|
||||
username: admin
|
||||
password: Harbor12345
|
||||
|
|
@ -49,4 +49,3 @@ image_registry:
|
|||
# bucket: bucketname
|
||||
# keyid: mykeyid
|
||||
# rootdirectory: /s3/object/name/prefix
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,11 @@
|
|||
---
|
||||
- name: Sync images to remote
|
||||
tags: ["only_image"]
|
||||
copy:
|
||||
src: |
|
||||
{{ .work_dir }}/kubekey/images/
|
||||
dest: /tmp/kubekey/images/
|
||||
|
||||
- name: Create harbor project for each image
|
||||
tags: ["only_image"]
|
||||
command: |
|
||||
|
|
@ -9,18 +16,11 @@
|
|||
continue
|
||||
fi
|
||||
|
||||
dir_name=${dir##*/}
|
||||
IFS='=' set -- $dir_name
|
||||
image_array="$@"
|
||||
array_length=$#
|
||||
project=${dir##*/}
|
||||
|
||||
if [ "$array_length" -gt 3 ]; then
|
||||
project=$2
|
||||
dest_image=$(shift 2 && echo "$*" | tr ' ' '/')
|
||||
tag=$(echo "$@" | awk '{print $NF}')
|
||||
else
|
||||
echo "unsupported image: $dir_name"
|
||||
exit 1
|
||||
if [ "$project" == "blobs" ]; then
|
||||
# skip blobs dir
|
||||
continue
|
||||
fi
|
||||
|
||||
# if project is not exist, create if
|
||||
|
|
@ -44,6 +44,7 @@
|
|||
tags: ["only_image"]
|
||||
image:
|
||||
push:
|
||||
images_dir: /tmp/kubekey/images/
|
||||
registry: |
|
||||
{{ .image_registry.auth.registry }}
|
||||
namespace_override: |
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ kubernetes:
|
|||
pod_cidr: 10.233.64.0/18
|
||||
service_cidr: 10.233.0.0/18
|
||||
dns_image: |
|
||||
{{ .k8s_registry }}/coredns/coredns:v1.8.6
|
||||
{{ .k8s_registry }}/coredns/coredns:1.8.6
|
||||
dns_cache_image: |
|
||||
{{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20
|
||||
dns_service_ip: |
|
||||
|
|
|
|||
|
|
@ -10,4 +10,3 @@ EnvironmentFile=-/etc/default/kubelet
|
|||
Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .hostname }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
|
||||
ExecStart=
|
||||
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,11 @@ cluster_require:
|
|||
etcd_disk_wal_fysnc_duration_seconds: 10000000
|
||||
allow_unsupported_distribution_setup: false
|
||||
# support ubuntu, centos.
|
||||
supported_os_distributions: ['ubuntu', 'centos']
|
||||
supported_os_distributions:
|
||||
- ubuntu
|
||||
- '"ubuntu"'
|
||||
- centos
|
||||
- '"centos"'
|
||||
require_network_plugin: ['calico', 'flannel', 'cilium', 'hybridnet', 'kube-ovn']
|
||||
# the minimal version of kubernetes to be installed.
|
||||
kube_version_min_required: v1.19.10
|
||||
|
|
@ -24,3 +28,4 @@ cluster_require:
|
|||
arm64:
|
||||
- arm64
|
||||
- aarch64
|
||||
min_kernel_version: 4.9.17
|
||||
|
|
|
|||
|
|
@ -34,4 +34,5 @@
|
|||
|
||||
- name: Stop if kernel version is too low
|
||||
assert:
|
||||
that: .os.kernel_version | splitList "-" | first | semverCompare ">=4.9.17"
|
||||
that: .os.kernel_version | splitList "-" | first | semverCompare (printf ">=%s" .cluster_require.min_kernel_version)
|
||||
fail_msg: "kernel version: {{ .os.kernel_version }} is too low, required at least: {{ .cluster_require.min_kernel_version }} "
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package options
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
|
@ -39,16 +39,19 @@ var (
|
|||
profileOutput string
|
||||
)
|
||||
|
||||
// AddProfilingFlags to NewControllerManagerCommand
|
||||
func AddProfilingFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
|
||||
flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
|
||||
}
|
||||
|
||||
func InitProfiling() error {
|
||||
// InitProfiling for profileName
|
||||
func InitProfiling(ctx context.Context) error {
|
||||
var (
|
||||
f *os.File
|
||||
err error
|
||||
)
|
||||
|
||||
switch profileName {
|
||||
case "none":
|
||||
return nil
|
||||
|
|
@ -57,6 +60,7 @@ func InitProfiling() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -76,22 +80,20 @@ func InitProfiling() error {
|
|||
|
||||
// If the command is interrupted before the end (ctrl-c), flush the
|
||||
// profiling files
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
<-c
|
||||
<-ctx.Done()
|
||||
if err := f.Close(); err != nil {
|
||||
fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
if err := FlushProfiling(); err != nil {
|
||||
fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FlushProfiling to local file
|
||||
func FlushProfiling() error {
|
||||
switch profileName {
|
||||
case "none":
|
||||
|
|
@ -100,17 +102,20 @@ func FlushProfiling() error {
|
|||
pprof.StopCPUProfile()
|
||||
case "heap":
|
||||
runtime.GC()
|
||||
|
||||
fallthrough
|
||||
default:
|
||||
profile := pprof.Lookup(profileName)
|
||||
if profile == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Create(profileOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := profile.WriteTo(f, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -125,11 +130,12 @@ func FlushProfiling() error {
|
|||
|
||||
var gops bool
|
||||
|
||||
// AddGOPSFlags to NewControllerManagerCommand
|
||||
func AddGOPSFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
|
||||
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
|
||||
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
|
||||
}
|
||||
|
||||
// InitGOPS if gops is true
|
||||
func InitGOPS() error {
|
||||
if gops {
|
||||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
|
|
@ -138,6 +144,7 @@ func InitGOPS() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -145,6 +152,7 @@ func InitGOPS() error {
|
|||
// KLOG
|
||||
// ======================================================================================
|
||||
|
||||
// AddKlogFlags to NewControllerManagerCommand
|
||||
func AddKlogFlags(fs *pflag.FlagSet) {
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
)
|
||||
|
||||
// ControllerManagerServerOptions for NewControllerManagerServerOptions
|
||||
type ControllerManagerServerOptions struct {
|
||||
// WorkDir is the baseDir which command find any resource (project etc.)
|
||||
WorkDir string
|
||||
|
|
@ -31,6 +31,7 @@ type ControllerManagerServerOptions struct {
|
|||
LeaderElection bool
|
||||
}
|
||||
|
||||
// NewControllerManagerServerOptions for NewControllerManagerCommand
|
||||
func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
|
||||
return &ControllerManagerServerOptions{
|
||||
WorkDir: "/kubekey",
|
||||
|
|
@ -38,18 +39,21 @@ func NewControllerManagerServerOptions() *ControllerManagerServerOptions {
|
|||
}
|
||||
}
|
||||
|
||||
// Flags add to NewControllerManagerCommand
|
||||
func (o *ControllerManagerServerOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
gfs := fss.FlagSet("generic")
|
||||
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
|
||||
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
|
||||
gfs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode, after a successful execution of Pipeline, "+"will retain runtime data, which includes task execution status and parameters.")
|
||||
cfs := fss.FlagSet("controller-manager")
|
||||
cfs.IntVar(&o.MaxConcurrentReconciles, "max-concurrent-reconciles", o.MaxConcurrentReconciles, "The number of maximum concurrent reconciles for controller.")
|
||||
cfs.BoolVar(&o.LeaderElection, "leader-election", o.LeaderElection, "Whether to enable leader election for controller-manager.")
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o *ControllerManagerServerOptions) Complete(cmd *cobra.Command, args []string) {
|
||||
// Complete for ControllerManagerServerOptions
|
||||
func (o *ControllerManagerServerOptions) Complete() {
|
||||
// do nothing
|
||||
if o.MaxConcurrentReconciles == 0 {
|
||||
o.MaxConcurrentReconciles = 1
|
||||
|
|
|
|||
|
|
@ -28,8 +28,10 @@ import (
|
|||
"github.com/kubesphere/kubekey/v4/pkg/manager"
|
||||
)
|
||||
|
||||
// NewControllerManagerCommand operator command.
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
o := options.NewControllerManagerServerOptions()
|
||||
ctx := signals.SetupSignalHandler()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "controller-manager",
|
||||
|
|
@ -38,13 +40,14 @@ func NewControllerManagerCommand() *cobra.Command {
|
|||
if err := options.InitGOPS(); err != nil {
|
||||
return err
|
||||
}
|
||||
return options.InitProfiling()
|
||||
|
||||
return options.InitProfiling(ctx)
|
||||
},
|
||||
PersistentPostRunE: func(*cobra.Command, []string) error {
|
||||
return options.FlushProfiling()
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
o.Complete(cmd, args)
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
o.Complete()
|
||||
// create workdir directory,if not exists
|
||||
_const.SetWorkDir(o.WorkDir)
|
||||
if _, err := os.Stat(o.WorkDir); os.IsNotExist(err) {
|
||||
|
|
@ -52,7 +55,8 @@ func NewControllerManagerCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), o)
|
||||
|
||||
return run(ctx, o)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -68,6 +72,7 @@ func NewControllerManagerCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
cmd.AddCommand(newVersionCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func newVersionCommand() *cobra.Command {
|
|||
return &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version of KubeSphere controller-manager",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
cmd.Println(version.Get())
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -34,15 +33,17 @@ func newArtifactCommand() *cobra.Command {
|
|||
|
||||
cmd.AddCommand(newArtifactExportCommand())
|
||||
cmd.AddCommand(newArtifactImagesCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newArtifactExportCommand() *cobra.Command {
|
||||
o := options.NewArtifactExportOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Export a KubeKey offline installation package",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_export.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -55,7 +56,8 @@ func newArtifactExportCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -63,15 +65,17 @@ func newArtifactExportCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newArtifactImagesCommand() *cobra.Command {
|
||||
o := options.NewArtifactImagesOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "images",
|
||||
Short: "push images to a registry from an artifact",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/artifact_images.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -84,7 +88,8 @@ func newArtifactImagesCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -92,6 +97,7 @@ func newArtifactImagesCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -36,15 +35,17 @@ func newCertsCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
cmd.AddCommand(newCertsRenewCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newCertsRenewCommand() *cobra.Command {
|
||||
o := options.NewCertsRenewOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "renew",
|
||||
Short: "renew a cluster certs",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/certs_renew.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -57,7 +58,8 @@ func newCertsRenewCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -65,6 +67,7 @@ func newCertsRenewCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -36,15 +35,17 @@ func newCreateCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
cmd.AddCommand(newCreateClusterCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newCreateClusterCommand() *cobra.Command {
|
||||
o := options.NewCreateClusterOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Create a Kubernetes or KubeSphere cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/create_cluster.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -57,7 +58,8 @@ func newCreateClusterCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -65,6 +67,7 @@ func newCreateClusterCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -34,15 +33,17 @@ func newInitCommand() *cobra.Command {
|
|||
|
||||
cmd.AddCommand(newInitOSCommand())
|
||||
cmd.AddCommand(newInitRegistryCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newInitOSCommand() *cobra.Command {
|
||||
o := options.NewInitOSOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "os",
|
||||
Short: "Init operating system",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_os.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -55,7 +56,8 @@ func newInitOSCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -63,15 +65,17 @@ func newInitOSCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newInitRegistryCommand() *cobra.Command {
|
||||
o := options.NewInitRegistryOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Short: "Init a local image registry",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
pipeline, config, inventory, err := o.Complete(cmd, []string{"playbooks/init_registry.yaml"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -84,7 +88,8 @@ func newInitRegistryCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -92,6 +97,7 @@ func newInitRegistryCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,16 +30,20 @@ import (
|
|||
// artifact export
|
||||
// ======================================================================================
|
||||
|
||||
// ArtifactExportOptions for NewArtifactExportOptions
|
||||
type ArtifactExportOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newArtifactExportCommand
|
||||
func (o *ArtifactExportOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "artifact-export-",
|
||||
|
|
@ -51,16 +55,16 @@ func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
|
||||
config, inventory, err := o.completeRef(pipeline)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
|
@ -69,25 +73,30 @@ func (o ArtifactExportOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
return pipeline, config, inventory, nil
|
||||
}
|
||||
|
||||
// NewArtifactExportOptions for newArtifactExportCommand
|
||||
func NewArtifactExportOptions() *ArtifactExportOptions {
|
||||
// set default value
|
||||
return &ArtifactExportOptions{CommonOptions: newCommonOptions()}
|
||||
return &ArtifactExportOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// artifact image
|
||||
// ======================================================================================
|
||||
|
||||
// ArtifactImagesOptions for NewArtifactImagesOptions
|
||||
type ArtifactImagesOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newArtifactImagesCommand
|
||||
func (o *ArtifactImagesOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "artifact-images-",
|
||||
|
|
@ -99,17 +108,17 @@ func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
Tags: []string{"only_image"},
|
||||
}
|
||||
|
||||
config, inventory, err := o.completeRef(pipeline)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
|
@ -118,7 +127,8 @@ func (o ArtifactImagesOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
return pipeline, config, inventory, nil
|
||||
}
|
||||
|
||||
// NewArtifactImagesOptions for newArtifactImagesCommand
|
||||
func NewArtifactImagesOptions() *ArtifactImagesOptions {
|
||||
// set default value
|
||||
return &ArtifactImagesOptions{CommonOptions: newCommonOptions()}
|
||||
return &ArtifactImagesOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,20 +26,25 @@ import (
|
|||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
)
|
||||
|
||||
// NewCertsRenewOptions for newCertsRenewCommand
|
||||
func NewCertsRenewOptions() *CertsRenewOptions {
|
||||
// set default value
|
||||
return &CertsRenewOptions{CommonOptions: newCommonOptions()}
|
||||
return &CertsRenewOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
||||
// CertsRenewOptions for NewCertsRenewOptions
|
||||
type CertsRenewOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newCertsRenewCommand
|
||||
func (o *CertsRenewOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -52,17 +57,17 @@ func (o *CertsRenewOptions) Complete(cmd *cobra.Command, args []string) (*kkcore
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
Tags: []string{"certs"},
|
||||
}
|
||||
|
||||
config, inventory, err := o.completeRef(pipeline)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package options
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
|
@ -39,16 +39,19 @@ var (
|
|||
profileOutput string
|
||||
)
|
||||
|
||||
// AddProfilingFlags to NewRootCommand
|
||||
func AddProfilingFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)")
|
||||
flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to")
|
||||
}
|
||||
|
||||
func InitProfiling() error {
|
||||
// InitProfiling for profileName
|
||||
func InitProfiling(ctx context.Context) error {
|
||||
var (
|
||||
f *os.File
|
||||
err error
|
||||
)
|
||||
|
||||
switch profileName {
|
||||
case "none":
|
||||
return nil
|
||||
|
|
@ -57,6 +60,7 @@ func InitProfiling() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -76,22 +80,22 @@ func InitProfiling() error {
|
|||
|
||||
// If the command is interrupted before the end (ctrl-c), flush the
|
||||
// profiling files
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-c
|
||||
<-ctx.Done()
|
||||
if err := f.Close(); err != nil {
|
||||
fmt.Printf("failed to close file. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
|
||||
if err := FlushProfiling(); err != nil {
|
||||
fmt.Printf("failed to FlushProfiling. file: %v. error: %v \n", profileOutput, err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FlushProfiling to local file
|
||||
func FlushProfiling() error {
|
||||
switch profileName {
|
||||
case "none":
|
||||
|
|
@ -100,17 +104,20 @@ func FlushProfiling() error {
|
|||
pprof.StopCPUProfile()
|
||||
case "heap":
|
||||
runtime.GC()
|
||||
|
||||
fallthrough
|
||||
default:
|
||||
profile := pprof.Lookup(profileName)
|
||||
if profile == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Create(profileOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := profile.WriteTo(f, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -125,11 +132,13 @@ func FlushProfiling() error {
|
|||
|
||||
var gops bool
|
||||
|
||||
// AddGOPSFlags to NewRootCommand
|
||||
func AddGOPSFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&gops, "gops", false, "Whether to enable gops or not. When enabled this option, "+
|
||||
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
|
||||
}
|
||||
|
||||
// InitGOPS if gops is true
|
||||
func InitGOPS() error {
|
||||
if gops {
|
||||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
|
|
@ -138,6 +147,7 @@ func InitGOPS() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -145,6 +155,7 @@ func InitGOPS() error {
|
|||
// KLOG
|
||||
// ======================================================================================
|
||||
|
||||
// AddKlogFlags to NewRootCommand
|
||||
func AddKlogFlags(fs *pflag.FlagSet) {
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
|
|
|
|||
|
|
@ -26,27 +26,32 @@ import (
|
|||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
)
|
||||
|
||||
// NewCreateClusterOptions for newCreateClusterCommand
|
||||
func NewCreateClusterOptions() *CreateClusterOptions {
|
||||
// set default value
|
||||
return &CreateClusterOptions{CommonOptions: newCommonOptions()}
|
||||
return &CreateClusterOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
||||
// CreateClusterOptions for NewCreateClusterOptions
|
||||
type CreateClusterOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
// kubernetes version which the cluster will install.
|
||||
Kubernetes string
|
||||
// ContainerRuntime for kubernetes. Such as docker, containerd etc.
|
||||
ContainerManager string
|
||||
}
|
||||
|
||||
// Flags add to newCreateClusterCommand
|
||||
func (o *CreateClusterOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
kfs := fss.FlagSet("config")
|
||||
kfs.StringVar(&o.Kubernetes, "with-kubernetes", "", "Specify a supported version of kubernetes")
|
||||
kfs.StringVar(&o.ContainerManager, "container-manager", "", "Container runtime: docker, crio, containerd and isula.")
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -59,20 +64,21 @@ func (o *CreateClusterOptions) Complete(cmd *cobra.Command, args []string) (*kkc
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
|
||||
config, inventory, err := o.completeRef(pipeline)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
if o.Kubernetes != "" {
|
||||
// override kube_version in config
|
||||
if err := config.SetValue("kube_version", o.Kubernetes); err != nil {
|
||||
|
|
|
|||
|
|
@ -30,16 +30,20 @@ import (
|
|||
// init os
|
||||
// ======================================================================================
|
||||
|
||||
// InitOSOptions for NewInitOSOptions
|
||||
type InitOSOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newInitOSCommand
|
||||
func (o *InitOSOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "init-os-",
|
||||
|
|
@ -51,16 +55,16 @@ func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pi
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
|
||||
config, inventory, err := o.completeRef(pipeline)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
|
@ -69,25 +73,30 @@ func (o InitOSOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pi
|
|||
return pipeline, config, inventory, nil
|
||||
}
|
||||
|
||||
// NewInitOSOptions for newInitOSCommand
|
||||
func NewInitOSOptions() *InitOSOptions {
|
||||
// set default value
|
||||
return &InitOSOptions{CommonOptions: newCommonOptions()}
|
||||
return &InitOSOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// init registry
|
||||
// ======================================================================================
|
||||
|
||||
// InitRegistryOptions for NewInitRegistryOptions
|
||||
type InitRegistryOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newInitRegistryCommand
|
||||
func (o *InitRegistryOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
fss := o.commonOptions.flags()
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "init-registry-",
|
||||
|
|
@ -99,11 +108,10 @@ func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcor
|
|||
}
|
||||
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Playbook: o.Playbook,
|
||||
|
|
@ -117,7 +125,8 @@ func (o InitRegistryOptions) Complete(cmd *cobra.Command, args []string) (*kkcor
|
|||
return pipeline, config, inventory, nil
|
||||
}
|
||||
|
||||
// NewInitRegistryOptions for newInitRegistryCommand
|
||||
func NewInitRegistryOptions() *InitRegistryOptions {
|
||||
// set default value
|
||||
return &InitRegistryOptions{CommonOptions: newCommonOptions()}
|
||||
return &InitRegistryOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package options
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -45,7 +46,7 @@ var defaultInventory = &kkcorev1.Inventory{
|
|||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default"}}
|
||||
|
||||
type CommonOptions struct {
|
||||
type commonOptions struct {
|
||||
// Playbook which to execute.
|
||||
Playbook string
|
||||
// HostFile is the path of host file
|
||||
|
|
@ -64,10 +65,11 @@ type CommonOptions struct {
|
|||
Namespace string
|
||||
}
|
||||
|
||||
func newCommonOptions() CommonOptions {
|
||||
o := CommonOptions{
|
||||
func newCommonOptions() commonOptions {
|
||||
o := commonOptions{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "get current dir error")
|
||||
|
|
@ -75,10 +77,11 @@ func newCommonOptions() CommonOptions {
|
|||
} else {
|
||||
o.WorkDir = filepath.Join(wd, "kubekey")
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
|
||||
func (o *commonOptions) flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
gfs := fss.FlagSet("generic")
|
||||
gfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
|
||||
|
|
@ -88,10 +91,11 @@ func (o *CommonOptions) Flags() cliflag.NamedFlagSets {
|
|||
gfs.StringVarP(&o.InventoryFile, "inventory", "i", o.InventoryFile, "the host list file path. support *.ini")
|
||||
gfs.BoolVarP(&o.Debug, "debug", "d", o.Debug, "Debug mode, after a successful execution of Pipeline, will retain runtime data, which includes task execution status and parameters.")
|
||||
gfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "the namespace which pipeline will be executed, all reference resources(pipeline, config, inventory, task) should in the same namespace")
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
func (o *commonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
if !filepath.IsAbs(o.WorkDir) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
|
|
@ -99,7 +103,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf
|
|||
}
|
||||
o.WorkDir = filepath.Join(wd, o.WorkDir)
|
||||
}
|
||||
|
||||
// complete config
|
||||
config, err := o.genConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("generate config error: %w", err)
|
||||
|
|
@ -112,7 +116,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf
|
|||
APIVersion: config.APIVersion,
|
||||
ResourceVersion: config.ResourceVersion,
|
||||
}
|
||||
|
||||
// complete inventory
|
||||
inventory, err := o.genInventory()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("generate inventory error: %w", err)
|
||||
|
|
@ -130,7 +134,7 @@ func (o *CommonOptions) completeRef(pipeline *kkcorev1.Pipeline) (*kkcorev1.Conf
|
|||
}
|
||||
|
||||
// genConfig generate config by ConfigFile and set value by command args.
|
||||
func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) {
|
||||
func (o *commonOptions) genConfig() (*kkcorev1.Config, error) {
|
||||
config := defaultConfig.DeepCopy()
|
||||
if o.ConfigFile != "" {
|
||||
cdata, err := os.ReadFile(o.ConfigFile)
|
||||
|
|
@ -142,13 +146,15 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) {
|
|||
return nil, fmt.Errorf("unmarshal config file error: %w", err)
|
||||
}
|
||||
}
|
||||
// set by command args
|
||||
// set value by command args
|
||||
if o.Namespace != "" {
|
||||
config.Namespace = o.Namespace
|
||||
}
|
||||
if wd, err := config.GetValue("work_dir"); err == nil && wd != nil {
|
||||
// if work_dir is defined in config, use it. otherwise use current dir.
|
||||
o.WorkDir = wd.(string)
|
||||
if workDir, ok := wd.(string); ok {
|
||||
o.WorkDir = workDir
|
||||
}
|
||||
} else if err := config.SetValue("work_dir", o.WorkDir); err != nil {
|
||||
return nil, fmt.Errorf("work_dir to config error: %w", err)
|
||||
}
|
||||
|
|
@ -162,7 +168,7 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) {
|
|||
for _, setVal := range strings.Split(unescapeString(s), ",") {
|
||||
i := strings.Index(setVal, "=")
|
||||
if i == 0 || i == -1 {
|
||||
return nil, fmt.Errorf("--set value should be k=v")
|
||||
return nil, errors.New("--set value should be k=v")
|
||||
}
|
||||
if err := setValue(config, setVal[:i], setVal[i+1:]); err != nil {
|
||||
return nil, fmt.Errorf("--set value to config error: %w", err)
|
||||
|
|
@ -174,21 +180,23 @@ func (o *CommonOptions) genConfig() (*kkcorev1.Config, error) {
|
|||
}
|
||||
|
||||
// genConfig generate config by ConfigFile and set value by command args.
|
||||
func (o *CommonOptions) genInventory() (*kkcorev1.Inventory, error) {
|
||||
func (o *commonOptions) genInventory() (*kkcorev1.Inventory, error) {
|
||||
inventory := defaultInventory.DeepCopy()
|
||||
if o.InventoryFile != "" {
|
||||
cdata, err := os.ReadFile(o.InventoryFile)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "read config file error")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
inventory = &kkcorev1.Inventory{}
|
||||
if err := yaml.Unmarshal(cdata, inventory); err != nil {
|
||||
klog.V(4).ErrorS(err, "unmarshal config file error")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// set by command args
|
||||
// set value by command args
|
||||
if o.Namespace != "" {
|
||||
inventory.Namespace = o.Namespace
|
||||
}
|
||||
|
|
@ -197,9 +205,9 @@ func (o *CommonOptions) genInventory() (*kkcorev1.Inventory, error) {
|
|||
}
|
||||
|
||||
// setValue set key: val in config.
|
||||
// if val is json string. convert to map or slice
|
||||
// if val is TRUE,YES,Y. convert to bool type true.
|
||||
// if val is FALSE,NO,N. convert to bool type false.
|
||||
// If val is json string. convert to map or slice
|
||||
// If val is TRUE,YES,Y. convert to bool type true.
|
||||
// If val is FALSE,NO,N. convert to bool type false.
|
||||
func setValue(config *kkcorev1.Config, key, val string) error {
|
||||
switch {
|
||||
case strings.HasPrefix(val, "{") && strings.HasSuffix(val, "{"):
|
||||
|
|
@ -208,6 +216,7 @@ func setValue(config *kkcorev1.Config, key, val string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return config.SetValue(key, value)
|
||||
case strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]"):
|
||||
var value []any
|
||||
|
|
@ -215,6 +224,7 @@ func setValue(config *kkcorev1.Config, key, val string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return config.SetValue(key, value)
|
||||
case strings.EqualFold(val, "TRUE") || strings.EqualFold(val, "YES") || strings.EqualFold(val, "Y"):
|
||||
return config.SetValue(key, true)
|
||||
|
|
|
|||
|
|
@ -5,24 +5,28 @@ import (
|
|||
cliflag "k8s.io/component-base/cli/flag"
|
||||
)
|
||||
|
||||
// PipelineOptions for NewPipelineOptions
|
||||
type PipelineOptions struct {
|
||||
Name string
|
||||
Namespace string
|
||||
WorkDir string
|
||||
}
|
||||
|
||||
func NewPipelineOption() *PipelineOptions {
|
||||
// NewPipelineOptions for newPipelineCommand
|
||||
func NewPipelineOptions() *PipelineOptions {
|
||||
return &PipelineOptions{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
WorkDir: "/kubekey",
|
||||
}
|
||||
}
|
||||
|
||||
// Flags add to newPipelineCommand
|
||||
func (o *PipelineOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
pfs := fss.FlagSet("pipeline flags")
|
||||
pfs.StringVar(&o.Name, "name", o.Name, "name of pipeline")
|
||||
pfs.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "namespace of pipeline")
|
||||
pfs.StringVar(&o.WorkDir, "work-dir", o.WorkDir, "the base Dir for kubekey. Default current dir. ")
|
||||
|
||||
return fss
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,19 +26,23 @@ import (
|
|||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
)
|
||||
|
||||
// NewPreCheckOptions for newPreCheckCommand
|
||||
func NewPreCheckOptions() *PreCheckOptions {
|
||||
// set default value
|
||||
return &PreCheckOptions{CommonOptions: newCommonOptions()}
|
||||
return &PreCheckOptions{commonOptions: newCommonOptions()}
|
||||
}
|
||||
|
||||
// PreCheckOptions for NewPreCheckOptions
|
||||
type PreCheckOptions struct {
|
||||
CommonOptions
|
||||
commonOptions
|
||||
}
|
||||
|
||||
// Flags add to newPreCheckCommand
|
||||
func (o *PreCheckOptions) Flags() cliflag.NamedFlagSets {
|
||||
return o.CommonOptions.Flags()
|
||||
return o.commonOptions.flags()
|
||||
}
|
||||
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *PreCheckOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
|||
|
|
@ -26,8 +26,9 @@ import (
|
|||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
)
|
||||
|
||||
type KubekeyRunOptions struct {
|
||||
CommonOptions
|
||||
// KubeKeyRunOptions for NewKubeKeyRunOptions
|
||||
type KubeKeyRunOptions struct {
|
||||
commonOptions
|
||||
// ProjectAddr is the storage for executable packages (in Ansible format).
|
||||
// When starting with http or https, it will be obtained from a Git repository.
|
||||
// When starting with file path, it will be obtained from the local path.
|
||||
|
|
@ -49,16 +50,19 @@ type KubekeyRunOptions struct {
|
|||
SkipTags []string
|
||||
}
|
||||
|
||||
func NewKubeKeyRunOptions() *KubekeyRunOptions {
|
||||
// NewKubeKeyRunOptions for newRunCommand
|
||||
func NewKubeKeyRunOptions() *KubeKeyRunOptions {
|
||||
// add default values
|
||||
o := &KubekeyRunOptions{
|
||||
CommonOptions: newCommonOptions(),
|
||||
o := &KubeKeyRunOptions{
|
||||
commonOptions: newCommonOptions(),
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.CommonOptions.Flags()
|
||||
// Flags add to newRunCommand
|
||||
func (o *KubeKeyRunOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := o.commonOptions.flags()
|
||||
gitfs := fss.FlagSet("project")
|
||||
gitfs.StringVar(&o.ProjectAddr, "project-addr", o.ProjectAddr, "the storage for executable packages (in Ansible format)."+
|
||||
" When starting with http or https, it will be obtained from a Git repository."+
|
||||
|
|
@ -75,20 +79,20 @@ func (o *KubekeyRunOptions) Flags() cliflag.NamedFlagSets {
|
|||
return fss
|
||||
}
|
||||
|
||||
func (o *KubekeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
// Complete options. create Pipeline, Config and Inventory
|
||||
func (o *KubeKeyRunOptions) Complete(cmd *cobra.Command, args []string) (*kkcorev1.Pipeline, *kkcorev1.Config, *kkcorev1.Inventory, error) {
|
||||
pipeline := &kkcorev1.Pipeline{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "run-",
|
||||
Namespace: o.Namespace,
|
||||
Annotations: map[string]string{},
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
}
|
||||
// complete playbook. now only support one playbook
|
||||
if len(args) == 1 {
|
||||
o.Playbook = args[0]
|
||||
} else {
|
||||
if len(args) != 1 {
|
||||
return nil, nil, nil, fmt.Errorf("%s\nSee '%s -h' for help and examples", cmd.Use, cmd.CommandPath())
|
||||
}
|
||||
o.Playbook = args[0]
|
||||
|
||||
pipeline.Spec = kkcorev1.PipelineSpec{
|
||||
Project: kkcorev1.PipelineProject{
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
|
|
@ -18,12 +17,12 @@ import (
|
|||
)
|
||||
|
||||
func newPipelineCommand() *cobra.Command {
|
||||
o := options.NewPipelineOption()
|
||||
o := options.NewPipelineOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "pipeline",
|
||||
Short: "Executor a pipeline in kubernetes",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
_const.SetWorkDir(o.WorkDir)
|
||||
restconfig, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
|
|
@ -34,28 +33,31 @@ func newPipelineCommand() *cobra.Command {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not get rest config: %w", err)
|
||||
}
|
||||
|
||||
client, err := ctrlclient.New(restconfig, ctrlclient.Options{
|
||||
Scheme: _const.Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create client: %w", err)
|
||||
}
|
||||
ctx := signals.SetupSignalHandler()
|
||||
// get pipeline
|
||||
var pipeline = new(kkcorev1.Pipeline)
|
||||
var config = new(kkcorev1.Config)
|
||||
var inventory = new(kkcorev1.Inventory)
|
||||
if err := client.Get(ctx, ctrlclient.ObjectKey{
|
||||
Name: o.Name,
|
||||
Namespace: o.Namespace,
|
||||
}, pipeline); err != nil {
|
||||
return err
|
||||
}
|
||||
// get config
|
||||
var config = new(kkcorev1.Config)
|
||||
if err := client.Get(ctx, ctrlclient.ObjectKey{
|
||||
Name: pipeline.Spec.ConfigRef.Name,
|
||||
Namespace: pipeline.Spec.ConfigRef.Namespace,
|
||||
}, config); err != nil {
|
||||
return err
|
||||
}
|
||||
// get inventory
|
||||
var inventory = new(kkcorev1.Inventory)
|
||||
if err := client.Get(ctx, ctrlclient.ObjectKey{
|
||||
Name: pipeline.Spec.InventoryRef.Name,
|
||||
Namespace: pipeline.Spec.InventoryRef.Namespace,
|
||||
|
|
@ -76,5 +78,6 @@ func newPipelineCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
|
|
@ -37,8 +36,7 @@ func newPreCheckCommand() *cobra.Command {
|
|||
Short: "Check if the nodes is eligible for cluster deployment.",
|
||||
Long: "the tags can specify check items. support: etcd, os, network, cri, nfs.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
args = append(args, "playbooks/precheck.yaml")
|
||||
pipeline, config, inventory, err := o.Complete(cmd, args)
|
||||
pipeline, config, inventory, err := o.Complete(cmd, append(args, "playbooks/precheck.yaml"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -50,7 +48,8 @@ func newPreCheckCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), pipeline, config, inventory)
|
||||
|
||||
return run(ctx, pipeline, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -58,6 +57,7 @@ func newPreCheckCommand() *cobra.Command {
|
|||
for _, f := range o.Flags().FlagSets {
|
||||
flags.AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,11 +18,15 @@ package app
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
)
|
||||
|
||||
var internalCommand = []*cobra.Command{}
|
||||
// ctx cancel by shutdown signal
|
||||
var ctx = signals.SetupSignalHandler()
|
||||
|
||||
var internalCommand = make([]*cobra.Command, 0)
|
||||
|
||||
func registerInternalCommand(command *cobra.Command) {
|
||||
for _, c := range internalCommand {
|
||||
|
|
@ -34,6 +38,7 @@ func registerInternalCommand(command *cobra.Command) {
|
|||
internalCommand = append(internalCommand, command)
|
||||
}
|
||||
|
||||
// NewRootCommand console command.
|
||||
func NewRootCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kk",
|
||||
|
|
@ -42,12 +47,14 @@ func NewRootCommand() *cobra.Command {
|
|||
if err := options.InitGOPS(); err != nil {
|
||||
return err
|
||||
}
|
||||
return options.InitProfiling()
|
||||
|
||||
return options.InitProfiling(ctx)
|
||||
},
|
||||
PersistentPostRunE: func(*cobra.Command, []string) error {
|
||||
return options.FlushProfiling()
|
||||
},
|
||||
}
|
||||
cmd.SetContext(ctx)
|
||||
|
||||
// add common flag
|
||||
flags := cmd.PersistentFlags()
|
||||
|
|
@ -60,5 +67,6 @@ func NewRootCommand() *cobra.Command {
|
|||
cmd.AddCommand(newVersionCommand())
|
||||
// internal command
|
||||
cmd.AddCommand(internalCommand...)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/cmd/kk/app/options"
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
|
|
@ -53,13 +52,15 @@ func newRunCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
}
|
||||
return run(signals.SetupSignalHandler(), kk, config, inventory)
|
||||
|
||||
return run(ctx, kk, config, inventory)
|
||||
},
|
||||
}
|
||||
|
||||
for _, f := range o.Flags().FlagSets {
|
||||
cmd.Flags().AddFlagSet(f)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
@ -75,18 +76,23 @@ func run(ctx context.Context, pipeline *kkcorev1.Pipeline, config *kkcorev1.Conf
|
|||
return fmt.Errorf("could not get runtime-client: %w", err)
|
||||
}
|
||||
|
||||
// create config, inventory and pipeline
|
||||
// create config
|
||||
if err := client.Create(ctx, config); err != nil {
|
||||
klog.ErrorS(err, "Create config error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
// create inventory
|
||||
if err := client.Create(ctx, inventory); err != nil {
|
||||
klog.ErrorS(err, "Create inventory error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
// create pipeline
|
||||
pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
|
||||
if err := client.Create(ctx, pipeline); err != nil {
|
||||
klog.ErrorS(err, "Create pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func newVersionCommand() *cobra.Command {
|
|||
return &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version of KubeSphere controller-manager",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
cmd.Println(version.Get())
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ spec:
|
|||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Config store global vars for playbook.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
|
|
@ -17,6 +17,7 @@ spec:
|
|||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Inventory store hosts vars for playbook.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
|
|
@ -36,9 +37,11 @@ spec:
|
|||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: InventorySpec of Inventory
|
||||
properties:
|
||||
groups:
|
||||
additionalProperties:
|
||||
description: InventoryGroup of Inventory
|
||||
properties:
|
||||
groups:
|
||||
items:
|
||||
|
|
@ -30,6 +30,7 @@ spec:
|
|||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Pipeline resource executor a playbook.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
|
|
@ -49,6 +50,7 @@ spec:
|
|||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: PipelineSpec of pipeline.
|
||||
properties:
|
||||
configRef:
|
||||
description: ConfigRef is the global variable configuration for playbook
|
||||
|
|
@ -1972,14 +1974,19 @@ spec:
|
|||
- playbook
|
||||
type: object
|
||||
status:
|
||||
description: PipelineStatus of Pipeline
|
||||
properties:
|
||||
failedDetail:
|
||||
description: FailedDetail will record the failed tasks.
|
||||
items:
|
||||
description: PipelineFailedDetail store failed message when pipeline
|
||||
run failed.
|
||||
properties:
|
||||
hosts:
|
||||
description: failed Hosts Result of failed task.
|
||||
items:
|
||||
description: PipelineFailedDetailHost detail failed message
|
||||
for each host.
|
||||
properties:
|
||||
host:
|
||||
description: Host name of failed task.
|
||||
|
|
@ -99,6 +99,7 @@ gen_cert:
|
|||
拉取镜像到本地目录, 或推送镜像到远程服务器
|
||||
```yaml
|
||||
image:
|
||||
skip_tls_verify: true
|
||||
pull: ["image1", "image2"]
|
||||
push:
|
||||
registry: local.kubekey
|
||||
|
|
@ -106,6 +107,7 @@ image:
|
|||
password: password
|
||||
namespace_override: new_namespace
|
||||
```
|
||||
**skip_tls_verify**: 跳过证书认证. 默认true.
|
||||
**pull**: 拉取镜像到本地工作目录, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
|
||||
**push**: 推送工作目录中的镜像到远程仓库, 非必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
|
||||
**registry**: 远程仓库地址, 必填. 值采用[模板语法](101-syntax.md)编写, 对每个的host单独计算值.
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -8,7 +8,6 @@ require (
|
|||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/google/gops v0.3.28
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/schollz/progressbar/v3 v3.14.5
|
||||
github.com/spf13/cobra v1.8.0
|
||||
|
|
@ -84,6 +83,7 @@ require (
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
|
|
@ -30,6 +31,7 @@ import (
|
|||
// +k8s:openapi-gen=true
|
||||
// +kubebuilder:resource:scope=Namespaced
|
||||
|
||||
// Config store global vars for playbook.
|
||||
type Config struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
|
@ -38,6 +40,7 @@ type Config struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ConfigList of Config
|
||||
type ConfigList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
|
@ -60,15 +63,26 @@ func (c *Config) SetValue(key string, value any) error {
|
|||
// set value
|
||||
var f func(input map[string]any, key []string, value any) any
|
||||
f = func(input map[string]any, key []string, value any) any {
|
||||
if len(key) == 1 {
|
||||
input[key[0]] = value
|
||||
} else if len(key) > 1 {
|
||||
if v, ok := input[key[0]]; ok && reflect.TypeOf(v).Kind() == reflect.Map {
|
||||
input[key[0]] = f(v.(map[string]any), key[1:], value)
|
||||
} else {
|
||||
input[key[0]] = f(make(map[string]any), key[1:], value)
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return input
|
||||
}
|
||||
|
||||
firstKey := key[0]
|
||||
if len(key) == 1 {
|
||||
input[firstKey] = value
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
// Handle nested maps
|
||||
if v, ok := input[firstKey]; ok && reflect.TypeOf(v).Kind() == reflect.Map {
|
||||
if vd, ok := v.(map[string]any); ok {
|
||||
input[firstKey] = f(vd, key[1:], value)
|
||||
}
|
||||
} else {
|
||||
input[firstKey] = f(make(map[string]any), key[1:], value)
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
data, err := json.Marshal(f(configMap, strings.Split(key, "."), value))
|
||||
|
|
@ -76,6 +90,7 @@ func (c *Config) SetValue(key string, value any) error {
|
|||
return err
|
||||
}
|
||||
c.Spec.Raw = data
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -86,6 +101,7 @@ func (c *Config) GetValue(key string) (any, error) {
|
|||
if err := json.Unmarshal(c.Spec.Raw, &configMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// get all value
|
||||
if key == "" {
|
||||
return configMap, nil
|
||||
}
|
||||
|
|
@ -95,9 +111,10 @@ func (c *Config) GetValue(key string) (any, error) {
|
|||
r, ok := result.(map[string]any)
|
||||
if !ok {
|
||||
// cannot find value
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("cannot find key: %s", key)
|
||||
}
|
||||
result = r[k]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,9 @@ func TestSetValue(t *testing.T) {
|
|||
in := Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}}
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := in.SetValue(tc.key, tc.val)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, tc.except, in)
|
||||
})
|
||||
}
|
||||
|
|
@ -71,7 +73,7 @@ func TestGetValue(t *testing.T) {
|
|||
name: "all value",
|
||||
key: "",
|
||||
config: Config{Spec: runtime.RawExtension{Raw: []byte(`{"a":1}`)}},
|
||||
except: map[string]interface{}{
|
||||
except: map[string]any{
|
||||
"a": int64(1),
|
||||
},
|
||||
},
|
||||
|
|
@ -103,8 +105,7 @@ func TestGetValue(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
value, err := tc.config.GetValue(tc.key)
|
||||
assert.NoError(t, err)
|
||||
value, _ := tc.config.GetValue(tc.key)
|
||||
assert.Equal(t, tc.except, value)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,14 +21,17 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// InventoryHost of Inventory
|
||||
type InventoryHost map[string]runtime.RawExtension
|
||||
|
||||
// InventoryGroup of Inventory
|
||||
type InventoryGroup struct {
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
Hosts []string `json:"hosts,omitempty"`
|
||||
Vars runtime.RawExtension `json:"vars,omitempty"`
|
||||
}
|
||||
|
||||
// InventorySpec of Inventory
|
||||
type InventorySpec struct {
|
||||
// Hosts is all nodes
|
||||
Hosts InventoryHost `json:"hosts,omitempty"`
|
||||
|
|
@ -46,6 +49,7 @@ type InventorySpec struct {
|
|||
// +k8s:openapi-gen=true
|
||||
// +kubebuilder:resource:scope=Namespaced
|
||||
|
||||
// Inventory store hosts vars for playbook.
|
||||
type Inventory struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
|
@ -55,6 +59,7 @@ type Inventory struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// InventoryList of Inventory
|
||||
type InventoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
|
|
|||
|
|
@ -21,12 +21,17 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// PipelinePhase of Pipeline
|
||||
type PipelinePhase string
|
||||
|
||||
const (
|
||||
// PipelinePhasePending of Pipeline. Pipeline has created but not deal
|
||||
PipelinePhasePending PipelinePhase = "Pending"
|
||||
// PipelinePhaseRunning of Pipeline. deal Pipeline.
|
||||
PipelinePhaseRunning PipelinePhase = "Running"
|
||||
PipelinePhaseFailed PipelinePhase = "Failed"
|
||||
// PipelinePhaseFailed of Pipeline. once Task run failed.
|
||||
PipelinePhaseFailed PipelinePhase = "Failed"
|
||||
// PipelinePhaseSucceed of Pipeline. all Tasks run success.
|
||||
PipelinePhaseSucceed PipelinePhase = "Succeed"
|
||||
)
|
||||
|
||||
|
|
@ -35,6 +40,7 @@ const (
|
|||
BuiltinsProjectAnnotation = "kubekey.kubesphere.io/builtins-project"
|
||||
)
|
||||
|
||||
// PipelineSpec of pipeline.
|
||||
type PipelineSpec struct {
|
||||
// Project is storage for executable packages
|
||||
// +optional
|
||||
|
|
@ -118,6 +124,7 @@ type PipelineJobSpec struct {
|
|||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
}
|
||||
|
||||
// PipelineProject respect which playbook store.
|
||||
type PipelineProject struct {
|
||||
// Addr is the storage for executable packages (in Ansible file format).
|
||||
// When starting with http or https, it will be obtained from a Git repository.
|
||||
|
|
@ -141,6 +148,7 @@ type PipelineProject struct {
|
|||
Token string `json:"token,omitempty"`
|
||||
}
|
||||
|
||||
// PipelineStatus of Pipeline
|
||||
type PipelineStatus struct {
|
||||
// TaskResult total related tasks execute result.
|
||||
TaskResult PipelineTaskResult `json:"taskResult,omitempty"`
|
||||
|
|
@ -152,6 +160,7 @@ type PipelineStatus struct {
|
|||
FailedDetail []PipelineFailedDetail `json:"failedDetail,omitempty"`
|
||||
}
|
||||
|
||||
// PipelineTaskResult of Pipeline
|
||||
type PipelineTaskResult struct {
|
||||
// Total number of tasks.
|
||||
Total int `json:"total,omitempty"`
|
||||
|
|
@ -163,6 +172,7 @@ type PipelineTaskResult struct {
|
|||
Ignored int `json:"ignored,omitempty"`
|
||||
}
|
||||
|
||||
// PipelineFailedDetail store failed message when pipeline run failed.
|
||||
type PipelineFailedDetail struct {
|
||||
// Task name of failed task.
|
||||
Task string `json:"task,omitempty"`
|
||||
|
|
@ -170,6 +180,7 @@ type PipelineFailedDetail struct {
|
|||
Hosts []PipelineFailedDetailHost `json:"hosts,omitempty"`
|
||||
}
|
||||
|
||||
// PipelineFailedDetailHost detail failed message for each host.
|
||||
type PipelineFailedDetailHost struct {
|
||||
// Host name of failed task.
|
||||
Host string `json:"host,omitempty"`
|
||||
|
|
@ -189,6 +200,7 @@ type PipelineFailedDetailHost struct {
|
|||
// +kubebuilder:printcolumn:name="Total",type="integer",JSONPath=".status.taskResult.total"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Pipeline resource executor a playbook.
|
||||
type Pipeline struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
|
@ -199,6 +211,7 @@ type Pipeline struct {
|
|||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PipelineList of Pipeline
|
||||
type PipelineList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 is the internal version. should not register in kubernetes
|
||||
// Package v1alpha1 is the internal version, should not register in kubernetes
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=kubekey.kubesphere.io
|
||||
// +kubebuilder:skip
|
||||
|
|
|
|||
|
|
@ -21,13 +21,19 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// TaskPhase of Task
|
||||
type TaskPhase string
|
||||
|
||||
const (
|
||||
// TaskPhasePending of Task. Task has created but not deal
|
||||
TaskPhasePending TaskPhase = "Pending"
|
||||
// TaskPhaseRunning of Task. deal Task
|
||||
TaskPhaseRunning TaskPhase = "Running"
|
||||
// TaskPhaseSuccess of Task. Module of Task run success in each hosts.
|
||||
TaskPhaseSuccess TaskPhase = "Success"
|
||||
TaskPhaseFailed TaskPhase = "Failed"
|
||||
// TaskPhaseFailed of Task. once host run failed.
|
||||
TaskPhaseFailed TaskPhase = "Failed"
|
||||
// TaskPhaseIgnored of Task. once host run failed and set ignore_errors.
|
||||
TaskPhaseIgnored TaskPhase = "Ignored"
|
||||
)
|
||||
|
||||
|
|
@ -36,7 +42,8 @@ const (
|
|||
TaskAnnotationRole = "kubesphere.io/role"
|
||||
)
|
||||
|
||||
type KubeKeyTaskSpec struct {
|
||||
// TaskSpec of Task
|
||||
type TaskSpec struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Hosts []string `json:"hosts,omitempty"`
|
||||
IgnoreError *bool `json:"ignoreError,omitempty"`
|
||||
|
|
@ -50,17 +57,20 @@ type KubeKeyTaskSpec struct {
|
|||
Register string `json:"register,omitempty"`
|
||||
}
|
||||
|
||||
// Module of Task
|
||||
type Module struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Args runtime.RawExtension `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// TaskStatus of Task
|
||||
type TaskStatus struct {
|
||||
RestartCount int `json:"restartCount,omitempty"`
|
||||
Phase TaskPhase `json:"phase,omitempty"`
|
||||
HostResults []TaskHostResult `json:"hostResults,omitempty"`
|
||||
}
|
||||
|
||||
// TaskHostResult each host result for task
|
||||
type TaskHostResult struct {
|
||||
Host string `json:"host,omitempty"`
|
||||
Stdout string `json:"stdout,omitempty"`
|
||||
|
|
@ -71,29 +81,35 @@ type TaskHostResult struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:scope=Namespaced
|
||||
|
||||
// Task of pipeline
|
||||
type Task struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec KubeKeyTaskSpec `json:"spec,omitempty"`
|
||||
Status TaskStatus `json:"status,omitempty"`
|
||||
Spec TaskSpec `json:"spec,omitempty"`
|
||||
Status TaskStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// TaskList for Task
|
||||
type TaskList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Task `json:"items"`
|
||||
}
|
||||
|
||||
// IsComplete if Task IsSucceed or IsFailed
|
||||
func (t Task) IsComplete() bool {
|
||||
return t.IsSucceed() || t.IsFailed()
|
||||
}
|
||||
|
||||
// IsSucceed if Task.Status.Phase TaskPhaseSuccess or TaskPhaseIgnored
|
||||
func (t Task) IsSucceed() bool {
|
||||
return t.Status.Phase == TaskPhaseSuccess || t.Status.Phase == TaskPhaseIgnored
|
||||
}
|
||||
|
||||
// IsFailed Task.Status.Phase is failed when reach the retries
|
||||
func (t Task) IsFailed() bool {
|
||||
return t.Status.Phase == TaskPhaseFailed && t.Spec.Retries <= t.Status.RestartCount
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,43 +24,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeKeyTaskSpec) DeepCopyInto(out *KubeKeyTaskSpec) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IgnoreError != nil {
|
||||
in, out := &in.IgnoreError, &out.IgnoreError
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.When != nil {
|
||||
in, out := &in.When, &out.When
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.FailedWhen != nil {
|
||||
in, out := &in.FailedWhen, &out.FailedWhen
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Loop.DeepCopyInto(&out.Loop)
|
||||
in.Module.DeepCopyInto(&out.Module)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeKeyTaskSpec.
|
||||
func (in *KubeKeyTaskSpec) DeepCopy() *KubeKeyTaskSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeKeyTaskSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Module) DeepCopyInto(out *Module) {
|
||||
*out = *in
|
||||
|
|
@ -151,6 +114,43 @@ func (in *TaskList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TaskSpec) DeepCopyInto(out *TaskSpec) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IgnoreError != nil {
|
||||
in, out := &in.IgnoreError, &out.IgnoreError
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.When != nil {
|
||||
in, out := &in.When, &out.When
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.FailedWhen != nil {
|
||||
in, out := &in.FailedWhen, &out.FailedWhen
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Loop.DeepCopyInto(&out.Loop)
|
||||
in.Module.DeepCopyInto(&out.Module)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec.
|
||||
func (in *TaskSpec) DeepCopy() *TaskSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TaskSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TaskStatus) DeepCopyInto(out *TaskStatus) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// Base defined in project.
|
||||
type Base struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
|
|
|
|||
|
|
@ -23,15 +23,17 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Block defined in project.
|
||||
type Block struct {
|
||||
BlockBase
|
||||
// If has Block, Task should be empty
|
||||
// If it has Block, Task should be empty
|
||||
Task
|
||||
IncludeTasks string `yaml:"include_tasks,omitempty"`
|
||||
|
||||
BlockInfo
|
||||
}
|
||||
|
||||
// BlockBase defined in project.
|
||||
type BlockBase struct {
|
||||
Base `yaml:",inline"`
|
||||
Conditional `yaml:",inline"`
|
||||
|
|
@ -41,12 +43,14 @@ type BlockBase struct {
|
|||
Delegatable `yaml:",inline"`
|
||||
}
|
||||
|
||||
// BlockInfo defined in project.
|
||||
type BlockInfo struct {
|
||||
Block []Block `yaml:"block,omitempty"`
|
||||
Rescue []Block `yaml:"rescue,omitempty"`
|
||||
Always []Block `yaml:"always,omitempty"`
|
||||
}
|
||||
|
||||
// Task defined in project.
|
||||
type Task struct {
|
||||
AsyncVal int `yaml:"async,omitempty"`
|
||||
ChangedWhen When `yaml:"changed_when,omitempty"`
|
||||
|
|
@ -62,11 +66,12 @@ type Task struct {
|
|||
// deprecated, used to be loop and loop_args but loop has been repurposed
|
||||
//LoopWith string `yaml:"loop_with"`
|
||||
|
||||
//
|
||||
UnknownFiled map[string]any `yaml:"-"`
|
||||
// UnknownField store undefined filed
|
||||
UnknownField map[string]any `yaml:"-"`
|
||||
}
|
||||
|
||||
func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// UnmarshalYAML yaml string to block.
|
||||
func (b *Block) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
// fill baseInfo
|
||||
var bb BlockBase
|
||||
if err := unmarshal(&bb); err == nil {
|
||||
|
|
@ -76,58 +81,110 @@ func (b *Block) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
var m map[string]any
|
||||
if err := unmarshal(&m); err != nil {
|
||||
klog.Errorf("unmarshal data to map error: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if v, ok := m["include_tasks"]; ok {
|
||||
b.IncludeTasks = v.(string)
|
||||
} else if _, ok := m["block"]; ok {
|
||||
// render block
|
||||
var bi BlockInfo
|
||||
err := unmarshal(&bi)
|
||||
if includeTasks, ok := handleIncludeTasks(m); ok {
|
||||
// Set the IncludeTasks field if "include_tasks" exists and is valid.
|
||||
b.IncludeTasks = includeTasks
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case m["block"] != nil:
|
||||
// If the "block" key exists, unmarshal it into BlockInfo and set the BlockInfo field.
|
||||
bi, err := handleBlock(m, unmarshal)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal data to block error: %v", err)
|
||||
return err
|
||||
}
|
||||
b.BlockInfo = bi
|
||||
} else {
|
||||
// render task
|
||||
var t Task
|
||||
err := unmarshal(&t)
|
||||
default:
|
||||
// If neither "include_tasks" nor "block" are present, treat the data as a task.
|
||||
t, err := handleTask(m, unmarshal)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal data to task error: %v", err)
|
||||
return err
|
||||
}
|
||||
b.Task = t
|
||||
deleteExistField(reflect.TypeOf(Block{}), m)
|
||||
// set unknown flied to task.UnknownFiled
|
||||
b.UnknownFiled = m
|
||||
// Set any remaining unknown fields to the Task's UnknownField.
|
||||
b.UnknownField = m
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleIncludeTasks checks if the "include_tasks" key exists in the map and is of type string.
|
||||
// If so, it returns the string value and true, otherwise it returns an empty string and false.
|
||||
func handleIncludeTasks(m map[string]any) (string, bool) {
|
||||
if v, ok := m["include_tasks"]; ok {
|
||||
if it, ok := v.(string); ok {
|
||||
return it, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// handleBlock attempts to unmarshal the block data into a BlockInfo structure.
|
||||
// If successful, it returns the BlockInfo and nil. If an error occurs, it logs the error and returns it.
|
||||
func handleBlock(_ map[string]any, unmarshal func(any) error) (BlockInfo, error) {
|
||||
var bi BlockInfo
|
||||
if err := unmarshal(&bi); err != nil {
|
||||
klog.Errorf("unmarshal data to block error: %v", err)
|
||||
|
||||
return bi, err
|
||||
}
|
||||
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// handleTask attempts to unmarshal the task data into a Task structure.
|
||||
// If successful, it deletes existing fields from the map, logs the error if it occurs, and returns the Task and nil.
|
||||
func handleTask(m map[string]any, unmarshal func(any) error) (Task, error) {
|
||||
var t Task
|
||||
if err := unmarshal(&t); err != nil {
|
||||
klog.Errorf("unmarshal data to task error: %v", err)
|
||||
|
||||
return t, err
|
||||
}
|
||||
deleteExistField(reflect.TypeOf(Block{}), m)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func deleteExistField(rt reflect.Type, m map[string]any) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
field := rt.Field(i)
|
||||
if field.Anonymous {
|
||||
deleteExistField(field.Type, m)
|
||||
} else {
|
||||
yamlTag := rt.Field(i).Tag.Get("yaml")
|
||||
if yamlTag != "" {
|
||||
for _, t := range strings.Split(yamlTag, ",") {
|
||||
if _, ok := m[t]; ok {
|
||||
delete(m, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t := strings.ToUpper(rt.Field(i).Name[:1]) + rt.Field(i).Name[1:]
|
||||
if _, ok := m[t]; ok {
|
||||
delete(m, t)
|
||||
break
|
||||
}
|
||||
if isFound := deleteField(rt.Field(i), m); isFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteField find and delete the filed, return the field if found.
|
||||
func deleteField(field reflect.StructField, m map[string]any) bool {
|
||||
yamlTag := field.Tag.Get("yaml")
|
||||
if yamlTag != "" {
|
||||
for _, t := range strings.Split(yamlTag, ",") {
|
||||
if _, ok := m[t]; ok {
|
||||
delete(m, t)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t := strings.ToUpper(field.Name[:1]) + field.Name[1:]
|
||||
if _, ok := m[t]; ok {
|
||||
delete(m, t)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// CollectionSearch defined in project.
|
||||
type CollectionSearch struct {
|
||||
Collections []string `yaml:"collections,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,27 +17,34 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Conditional defined in project.
|
||||
type Conditional struct {
|
||||
When When `yaml:"when,omitempty"`
|
||||
}
|
||||
|
||||
// When defined in project.
|
||||
type When struct {
|
||||
Data []string
|
||||
}
|
||||
|
||||
func (w *When) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// UnmarshalYAML yaml string to when
|
||||
func (w *When) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err == nil {
|
||||
w.Data = []string{s}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var a []string
|
||||
if err := unmarshal(&a); err == nil {
|
||||
w.Data = a
|
||||
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported type, excepted string or array of strings")
|
||||
|
||||
return errors.New("unsupported type, excepted string or array of strings")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// Delegatable defined in project.
|
||||
type Delegatable struct {
|
||||
DelegateTo string `yaml:"delegate_to,omitempty"`
|
||||
DelegateFacts bool `yaml:"delegate_facts,omitempty"`
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// Handler defined in project.
|
||||
type Handler struct {
|
||||
//Task
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// LoopControl defined in project.
|
||||
type LoopControl struct {
|
||||
LoopVar string `yaml:"loop_var,omitempty"`
|
||||
IndexVar string `yaml:"index_var,omitempty"`
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// Notifiable defined in project.
|
||||
type Notifiable struct {
|
||||
Notify string `yaml:"notify,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,8 +16,11 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Play defined in project.
|
||||
type Play struct {
|
||||
ImportPlaybook string `yaml:"import_playbook,omitempty"`
|
||||
|
||||
|
|
@ -56,38 +59,50 @@ type Play struct {
|
|||
Order string `yaml:"order,omitempty"`
|
||||
}
|
||||
|
||||
// PlaySerial defined in project.
|
||||
type PlaySerial struct {
|
||||
Data []any
|
||||
}
|
||||
|
||||
func (s *PlaySerial) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// UnmarshalYAML yaml string to serial.
|
||||
func (s *PlaySerial) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var as []any
|
||||
if err := unmarshal(&as); err == nil {
|
||||
s.Data = as
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var a any
|
||||
if err := unmarshal(&a); err == nil {
|
||||
s.Data = []any{a}
|
||||
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported type, excepted any or array")
|
||||
|
||||
return errors.New("unsupported type, excepted any or array")
|
||||
}
|
||||
|
||||
// PlayHost defined in project.
|
||||
type PlayHost struct {
|
||||
Hosts []string
|
||||
}
|
||||
|
||||
func (p *PlayHost) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// UnmarshalYAML yaml string to play
|
||||
func (p *PlayHost) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var hs []string
|
||||
if err := unmarshal(&hs); err == nil {
|
||||
p.Hosts = hs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var h string
|
||||
if err := unmarshal(&h); err == nil {
|
||||
p.Hosts = []string{h}
|
||||
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported type, excepted string or string array")
|
||||
|
||||
return errors.New("unsupported type, excepted string or string array")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ func TestUnmarshalYaml(t *testing.T) {
|
|||
Tasks: []Block{
|
||||
{
|
||||
BlockBase: BlockBase{Base: Base{Name: "test"}},
|
||||
Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
|
||||
Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -200,7 +200,7 @@ func TestUnmarshalYaml(t *testing.T) {
|
|||
BlockInfo: BlockInfo{
|
||||
Block: []Block{{
|
||||
BlockBase: BlockBase{Base: Base{Name: "test | test"}},
|
||||
Task: Task{UnknownFiled: map[string]any{"custom-module": "abc"}},
|
||||
Task: Task{UnknownField: map[string]any{"custom-module": "abc"}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
|
|
@ -214,7 +214,9 @@ func TestUnmarshalYaml(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
var pb []Play
|
||||
err := yaml.Unmarshal(tc.data, &pb)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, tc.excepted, pb)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,24 +16,30 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Playbook defined in project.
|
||||
type Playbook struct {
|
||||
Play []Play
|
||||
}
|
||||
|
||||
// Validate playbook. delete empty ImportPlaybook which has convert to play.
|
||||
func (p *Playbook) Validate() error {
|
||||
var newPlay = make([]Play, len(p.Play))
|
||||
for i, play := range p.Play {
|
||||
// delete import_playbook import_playbook is a link, should be ignored.
|
||||
var newPlay = make([]Play, 0)
|
||||
for _, play := range p.Play {
|
||||
// import_playbook is a link, should be ignored.
|
||||
if play.ImportPlaybook != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(play.PlayHost.Hosts) == 0 {
|
||||
return fmt.Errorf("playbook's hosts must not be empty")
|
||||
return errors.New("playbook's hosts must not be empty")
|
||||
}
|
||||
newPlay[i] = play
|
||||
newPlay = append(newPlay, play)
|
||||
}
|
||||
p.Play = newPlay
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,8 +41,7 @@ func TestValidate(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.playbook.Validate()
|
||||
assert.Error(t, err)
|
||||
assert.Error(t, tc.playbook.Validate())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,10 +16,12 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// Role defined in project.
|
||||
type Role struct {
|
||||
RoleInfo
|
||||
}
|
||||
|
||||
// RoleInfo defined in project.
|
||||
type RoleInfo struct {
|
||||
Base `yaml:",inline"`
|
||||
Conditional `yaml:",inline"`
|
||||
|
|
@ -32,15 +34,19 @@ type RoleInfo struct {
|
|||
Block []Block
|
||||
}
|
||||
|
||||
func (r *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// UnmarshalYAML yaml string to role.
|
||||
func (r *Role) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err == nil {
|
||||
r.Role = s
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var info RoleInfo
|
||||
if err := unmarshal(&info); err == nil {
|
||||
r.RoleInfo = info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,18 @@ package v1
|
|||
|
||||
import "slices"
|
||||
|
||||
// the special tags
|
||||
const (
|
||||
// AlwaysTag it always run
|
||||
AlwaysTag = "always"
|
||||
// NeverTag it never run
|
||||
NeverTag = "never"
|
||||
// AllTag represent all tags
|
||||
AllTag = "all"
|
||||
// TaggedTag represent which has tags
|
||||
TaggedTag = "tagged"
|
||||
)
|
||||
|
||||
// Taggable if it should executor
|
||||
type Taggable struct {
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
|
|
@ -28,27 +40,28 @@ func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool {
|
|||
shouldRun := true
|
||||
|
||||
if len(onlyTags) > 0 {
|
||||
if slices.Contains(t.Tags, "always") {
|
||||
switch {
|
||||
case slices.Contains(t.Tags, AlwaysTag):
|
||||
shouldRun = true
|
||||
} else if slices.Contains(onlyTags, "all") && !slices.Contains(t.Tags, "never") {
|
||||
case slices.Contains(onlyTags, AllTag) && !slices.Contains(t.Tags, NeverTag):
|
||||
shouldRun = true
|
||||
} else if slices.Contains(onlyTags, "tagged") && len(onlyTags) > 0 && !slices.Contains(t.Tags, "never") {
|
||||
case slices.Contains(onlyTags, TaggedTag) && !slices.Contains(t.Tags, NeverTag):
|
||||
shouldRun = true
|
||||
} else if !isdisjoint(onlyTags, t.Tags) {
|
||||
case !isdisjoint(onlyTags, t.Tags):
|
||||
shouldRun = true
|
||||
} else {
|
||||
default:
|
||||
shouldRun = false
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRun && len(skipTags) > 0 {
|
||||
if slices.Contains(skipTags, "all") {
|
||||
if !slices.Contains(t.Tags, "always") || !slices.Contains(skipTags, "always") {
|
||||
shouldRun = false
|
||||
}
|
||||
} else if !isdisjoint(skipTags, t.Tags) {
|
||||
switch {
|
||||
case slices.Contains(skipTags, AllTag) &&
|
||||
(!slices.Contains(t.Tags, AlwaysTag) || !slices.Contains(skipTags, AlwaysTag)):
|
||||
shouldRun = false
|
||||
} else if slices.Contains(skipTags, "tagged") && len(skipTags) > 0 {
|
||||
case !isdisjoint(skipTags, t.Tags):
|
||||
shouldRun = false
|
||||
case slices.Contains(skipTags, TaggedTag) && len(skipTags) > 0:
|
||||
shouldRun = false
|
||||
}
|
||||
}
|
||||
|
|
@ -59,13 +72,15 @@ func (t Taggable) IsEnabled(onlyTags []string, skipTags []string) bool {
|
|||
// JoinTag the child block should inherit tag for parent block
|
||||
func JoinTag(child, parent Taggable) Taggable {
|
||||
for _, tag := range parent.Tags {
|
||||
if tag == "always" { // skip inherit "always" tag
|
||||
if tag == AlwaysTag { // skip inherit "always" tag
|
||||
continue
|
||||
}
|
||||
|
||||
if !slices.Contains(child.Tags, tag) {
|
||||
child.Tags = append(child.Tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
|
|
@ -76,5 +91,6 @@ func isdisjoint(a, b []string) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/exec"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
|
|
@ -32,7 +33,6 @@ import (
|
|||
|
||||
// connectedType for connector
|
||||
const (
|
||||
connectedDefault = ""
|
||||
connectedSSH = "ssh"
|
||||
connectedLocal = "local"
|
||||
connectedKubernetes = "kubernetes"
|
||||
|
|
@ -60,6 +60,7 @@ type Connector interface {
|
|||
// vars contains all inventory for host. It's best to define the connector info in inventory file.
|
||||
func NewConnector(host string, connectorVars map[string]any) (Connector, error) {
|
||||
connectedType, _ := variable.StringVar(nil, connectorVars, _const.VariableConnectorType)
|
||||
|
||||
switch connectedType {
|
||||
case connectedLocal:
|
||||
return &localConnector{Cmd: exec.New()}, nil
|
||||
|
|
@ -74,7 +75,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort)
|
||||
portParam = defaultSSHPort
|
||||
portParam = ptr.To(defaultSSHPort)
|
||||
}
|
||||
// get user in connector variable. if empty, set default user: root.
|
||||
userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser)
|
||||
|
|
@ -93,9 +94,10 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
klog.V(4).Infof("ssh public key is empty, use: %s", defaultSSHPrivateKey)
|
||||
keyParam = defaultSSHPrivateKey
|
||||
}
|
||||
|
||||
return &sshConnector{
|
||||
Host: hostParam,
|
||||
Port: portParam,
|
||||
Port: *portParam,
|
||||
User: userParam,
|
||||
Password: passwdParam,
|
||||
PrivateKey: keyParam,
|
||||
|
|
@ -105,6 +107,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
if err != nil && host != _const.VariableLocalHost {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kubernetesConnector{Cmd: exec.New(), clusterName: host, kubeconfig: kubeconfig}, nil
|
||||
default:
|
||||
localHost, _ := os.Hostname()
|
||||
|
|
@ -121,7 +124,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
portParam, err := variable.IntVar(nil, connectorVars, _const.VariableConnectorPort)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("connector port is empty use: %v", defaultSSHPort)
|
||||
portParam = defaultSSHPort
|
||||
portParam = ptr.To(defaultSSHPort)
|
||||
}
|
||||
// get user in connector variable. if empty, set default user: root.
|
||||
userParam, err := variable.StringVar(nil, connectorVars, _const.VariableConnectorUser)
|
||||
|
|
@ -143,7 +146,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
|
||||
return &sshConnector{
|
||||
Host: hostParam,
|
||||
Port: portParam,
|
||||
Port: *portParam,
|
||||
User: userParam,
|
||||
Password: passwdParam,
|
||||
PrivateKey: keyParam,
|
||||
|
|
@ -153,7 +156,7 @@ func NewConnector(host string, connectorVars map[string]any) (Connector, error)
|
|||
|
||||
// GatherFacts get host info.
|
||||
type GatherFacts interface {
|
||||
Info(ctx context.Context) (map[string]any, error)
|
||||
HostInfo(ctx context.Context) (map[string]any, error)
|
||||
}
|
||||
|
||||
// isLocalIP check if given ipAddr is local network ip
|
||||
|
|
@ -161,8 +164,10 @@ func isLocalIP(ipAddr string) bool {
|
|||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "get network address error")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
|
|
@ -172,11 +177,14 @@ func isLocalIP(ipAddr string) bool {
|
|||
ip = v.IP
|
||||
default:
|
||||
klog.V(4).InfoS("unknown address type", "address", addr.String())
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if ip.String() == ipAddr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ func convertBytesToMap(bs []byte, split string) map[string]string {
|
|||
config[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,73 +36,86 @@ var _ Connector = &kubernetesConnector{}
|
|||
type kubernetesConnector struct {
|
||||
clusterName string
|
||||
kubeconfig string
|
||||
rootDir string
|
||||
homeDir string
|
||||
Cmd exec.Interface
|
||||
}
|
||||
|
||||
func (c *kubernetesConnector) Init(ctx context.Context) error {
|
||||
// Init connector, create home dir in local for each kubernetes.
|
||||
func (c *kubernetesConnector) Init(_ context.Context) error {
|
||||
if c.clusterName == _const.VariableLocalHost && c.kubeconfig == "" {
|
||||
// use default kubeconfig. skip
|
||||
klog.V(4).InfoS("kubeconfig is not set, using local kubeconfig")
|
||||
// use default kubeconfig. skip
|
||||
return nil
|
||||
}
|
||||
// set rootDir
|
||||
c.rootDir = filepath.Join(_const.GetWorkDir(), _const.KubernetesDir, c.clusterName)
|
||||
if _, err := os.Stat(c.rootDir); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(c.rootDir, os.ModePerm); err != nil {
|
||||
// set home dir for each kubernetes
|
||||
c.homeDir = filepath.Join(_const.GetWorkDir(), _const.KubernetesDir, c.clusterName)
|
||||
if _, err := os.Stat(c.homeDir); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(c.homeDir, os.ModePerm); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
|
||||
// if dir is not exist, create it.
|
||||
return err
|
||||
}
|
||||
}
|
||||
// set kubeconfig to root dir
|
||||
kubeconfigPath := filepath.Join(c.rootDir, kubeconfigRelPath)
|
||||
// create kubeconfig path in home dir
|
||||
kubeconfigPath := filepath.Join(c.homeDir, kubeconfigRelPath)
|
||||
if _, err := os.Stat(kubeconfigPath); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(kubeconfigPath), os.ModePerm); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create local dir", "cluster", c.clusterName)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
// write kubeconfig to home dir
|
||||
if err := os.WriteFile(kubeconfigPath, []byte(c.kubeconfig), os.ModePerm); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create kubeconfig file", "cluster", c.clusterName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *kubernetesConnector) Close(ctx context.Context) error {
|
||||
// Close connector, do nothing
|
||||
func (c *kubernetesConnector) Close(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutFile copy src file to dst file. src is the local filename, dst is the local filename.
|
||||
// Typically, the configuration file for each cluster may be different,
|
||||
// and it may be necessary to keep them in separate directories locally.
|
||||
func (c *kubernetesConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
dst = filepath.Join(c.rootDir, dst)
|
||||
func (c *kubernetesConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
dst = filepath.Join(c.homeDir, dst)
|
||||
if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.WriteFile(dst, src, mode)
|
||||
}
|
||||
|
||||
// FetchFile copy src file to dst writer. src is the local filename, dst is the local writer.
|
||||
func (c *kubernetesConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error {
|
||||
// add "--kubeconfig" to src command
|
||||
klog.V(5).InfoS("exec local command", "cmd", src)
|
||||
command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", src)
|
||||
command.SetDir(c.rootDir)
|
||||
command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.rootDir, kubeconfigRelPath)})
|
||||
command.SetDir(c.homeDir)
|
||||
command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)})
|
||||
command.SetStdout(dst)
|
||||
_, err := command.CombinedOutput()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecuteCommand in a kubernetes cluster
|
||||
func (c *kubernetesConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
|
||||
// add "--kubeconfig" to src command
|
||||
klog.V(4).InfoS("exec local command", "cmd", cmd)
|
||||
klog.V(5).InfoS("exec local command", "cmd", cmd)
|
||||
command := c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd)
|
||||
command.SetDir(c.rootDir)
|
||||
command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.rootDir, kubeconfigRelPath)})
|
||||
command.SetDir(c.homeDir)
|
||||
command.SetEnv([]string{"KUBECONFIG=" + filepath.Join(c.homeDir, kubeconfigRelPath)})
|
||||
|
||||
return command.CombinedOutput()
|
||||
}
|
||||
|
|
@ -39,46 +39,57 @@ type localConnector struct {
|
|||
Cmd exec.Interface
|
||||
}
|
||||
|
||||
func (c *localConnector) Init(ctx context.Context) error {
|
||||
// Init connector. do nothing
|
||||
func (c *localConnector) Init(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *localConnector) Close(ctx context.Context) error {
|
||||
// Close connector. do nothing
|
||||
func (c *localConnector) Close(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutFile copy src file to dst file. src is the local filename, dst is the local filename.
|
||||
func (c *localConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
func (c *localConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
if _, err := os.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), mode); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create local dir", "dst_file", dst)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.WriteFile(dst, src, mode)
|
||||
}
|
||||
|
||||
// FetchFile copy src file to dst writer. src is the local filename, dst is the local writer.
|
||||
func (c *localConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error {
|
||||
func (c *localConnector) FetchFile(_ context.Context, src string, dst io.Writer) error {
|
||||
var err error
|
||||
file, err := os.Open(src)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to read local file failed", "src_file", src)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dst, file); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to copy local file", "src_file", src)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecuteCommand in local host
|
||||
func (c *localConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
|
||||
klog.V(4).InfoS("exec local command", "cmd", cmd)
|
||||
klog.V(5).InfoS("exec local command", "cmd", cmd)
|
||||
|
||||
return c.Cmd.CommandContext(ctx, "/bin/sh", "-c", cmd).CombinedOutput()
|
||||
}
|
||||
|
||||
func (c *localConnector) Info(ctx context.Context) (map[string]any, error) {
|
||||
// HostInfo for GatherFacts
|
||||
func (c *localConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
// os information
|
||||
|
|
@ -123,6 +134,7 @@ func (c *localConnector) Info(ctx context.Context) (map[string]any, error) {
|
|||
}, nil
|
||||
default:
|
||||
klog.V(4).ErrorS(nil, "Unsupported platform", "platform", runtime.GOOS)
|
||||
return nil, nil
|
||||
|
||||
return make(map[string]any), nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package connector
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
|
@ -32,16 +33,17 @@ func newFakeLocalConnector(runCmd string, output string) *localConnector {
|
|||
return &localConnector{
|
||||
Cmd: &testingexec.FakeExec{CommandScript: []testingexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd {
|
||||
if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == fmt.Sprintf("/bin/sh -c %s", runCmd) {
|
||||
if strings.TrimSpace(fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) == "/bin/sh -c "+runCmd {
|
||||
return &testingexec.FakeCmd{
|
||||
CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
|
||||
return []byte(output), nil, nil
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
return &testingexec.FakeCmd{
|
||||
CombinedOutputScript: []testingexec.FakeAction{func() ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("error command")
|
||||
return nil, nil, errors.New("error command")
|
||||
}},
|
||||
}
|
||||
},
|
||||
|
|
@ -63,7 +65,7 @@ func TestSshConnector_ExecuteCommand(t *testing.T) {
|
|||
{
|
||||
name: "execute command failed",
|
||||
cmd: "echo 'hello1'",
|
||||
exceptedErr: fmt.Errorf("error command"),
|
||||
exceptedErr: errors.New("error command"),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,10 +19,12 @@ package connector
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
|
|
@ -34,11 +36,20 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultSSHPort = 22
|
||||
defaultSSHUser = "root"
|
||||
defaultSSHPrivateKey = "/root/.ssh/id_rsa"
|
||||
defaultSSHPort = 22
|
||||
defaultSSHUser = "root"
|
||||
)
|
||||
|
||||
var defaultSSHPrivateKey string
|
||||
|
||||
func init() {
|
||||
if currentUser, err := user.Current(); err == nil {
|
||||
defaultSSHPrivateKey = filepath.Join(currentUser.HomeDir, ".ssh/id_rsa")
|
||||
} else {
|
||||
defaultSSHPrivateKey = filepath.Join(defaultSSHUser, ".ssh/id_rsa")
|
||||
}
|
||||
}
|
||||
|
||||
var _ Connector = &sshConnector{}
|
||||
var _ GatherFacts = &sshConnector{}
|
||||
|
||||
|
|
@ -51,10 +62,12 @@ type sshConnector struct {
|
|||
client *ssh.Client
|
||||
}
|
||||
|
||||
func (c *sshConnector) Init(ctx context.Context) error {
|
||||
// Init connector, get ssh.Client
|
||||
func (c *sshConnector) Init(context.Context) error {
|
||||
if c.Host == "" {
|
||||
return fmt.Errorf("host is not set")
|
||||
return errors.New("host is not set")
|
||||
}
|
||||
|
||||
var auth []ssh.AuthMethod
|
||||
if c.Password != "" {
|
||||
auth = append(auth, ssh.Password(c.Password))
|
||||
|
|
@ -79,6 +92,7 @@ func (c *sshConnector) Init(ctx context.Context) error {
|
|||
})
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Dial ssh server failed", "host", c.Host, "port", c.Port)
|
||||
|
||||
return err
|
||||
}
|
||||
c.client = sshClient
|
||||
|
|
@ -86,16 +100,18 @@ func (c *sshConnector) Init(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *sshConnector) Close(ctx context.Context) error {
|
||||
// Close connector
|
||||
func (c *sshConnector) Close(context.Context) error {
|
||||
return c.client.Close()
|
||||
}
|
||||
|
||||
// PutFile to remote node. src is the file bytes. dst is the remote filename
|
||||
func (c *sshConnector) PutFile(ctx context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
func (c *sshConnector) PutFile(_ context.Context, src []byte, dst string, mode fs.FileMode) error {
|
||||
// create sftp client
|
||||
sftpClient, err := sftp.NewClient(c.client)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create sftp client")
|
||||
|
||||
return err
|
||||
}
|
||||
defer sftpClient.Close()
|
||||
|
|
@ -103,29 +119,35 @@ func (c *sshConnector) PutFile(ctx context.Context, src []byte, dst string, mode
|
|||
if _, err := sftpClient.Stat(filepath.Dir(dst)); err != nil && os.IsNotExist(err) {
|
||||
if err := sftpClient.MkdirAll(filepath.Dir(dst)); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create remote dir", "remote_file", dst)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rf, err := sftpClient.Create(dst)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create remote file", "remote_file", dst)
|
||||
|
||||
return err
|
||||
}
|
||||
defer rf.Close()
|
||||
|
||||
if _, err = rf.Write(src); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to write content to remote file", "remote_file", dst)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return rf.Chmod(mode)
|
||||
}
|
||||
|
||||
// FetchFile from remote node. src is the remote filename, dst is the local writer.
|
||||
func (c *sshConnector) FetchFile(ctx context.Context, src string, dst io.Writer) error {
|
||||
func (c *sshConnector) FetchFile(_ context.Context, src string, dst io.Writer) error {
|
||||
// create sftp client
|
||||
sftpClient, err := sftp.NewClient(c.client)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create sftp client", "remote_file", src)
|
||||
|
||||
return err
|
||||
}
|
||||
defer sftpClient.Close()
|
||||
|
|
@ -133,23 +155,28 @@ func (c *sshConnector) FetchFile(ctx context.Context, src string, dst io.Writer)
|
|||
rf, err := sftpClient.Open(src)
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to open file", "remote_file", src)
|
||||
|
||||
return err
|
||||
}
|
||||
defer rf.Close()
|
||||
|
||||
if _, err := io.Copy(dst, rf); err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to copy file", "remote_file", src)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte, error) {
|
||||
klog.V(4).InfoS("exec ssh command", "cmd", cmd, "host", c.Host)
|
||||
// ExecuteCommand in remote host
|
||||
func (c *sshConnector) ExecuteCommand(_ context.Context, cmd string) ([]byte, error) {
|
||||
klog.V(5).InfoS("exec ssh command", "cmd", cmd, "host", c.Host)
|
||||
// create ssh session
|
||||
session, err := c.client.NewSession()
|
||||
if err != nil {
|
||||
klog.V(4).ErrorS(err, "Failed to create ssh session")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer session.Close()
|
||||
|
|
@ -157,7 +184,8 @@ func (c *sshConnector) ExecuteCommand(ctx context.Context, cmd string) ([]byte,
|
|||
return session.CombinedOutput(cmd)
|
||||
}
|
||||
|
||||
func (c *sshConnector) Info(ctx context.Context) (map[string]any, error) {
|
||||
// HostInfo for GatherFacts
|
||||
func (c *sshConnector) HostInfo(ctx context.Context) (map[string]any, error) {
|
||||
// os information
|
||||
osVars := make(map[string]any)
|
||||
var osRelease bytes.Buffer
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ const ( // === From system generate ===
|
|||
// VariableInventoryName the value which defined in inventory.spec.host.
|
||||
VariableInventoryName = "inventory_name"
|
||||
// VariableHostName the value is node hostname, default VariableInventoryName.
|
||||
// if VariableInventoryName is "localhost". try to set the actual name.
|
||||
// If VariableInventoryName is "localhost". try to set the actual name.
|
||||
VariableHostName = "hostname"
|
||||
// VariableGlobalHosts the value is host_var which defined in inventory.
|
||||
VariableGlobalHosts = "inventory_hosts"
|
||||
|
|
@ -76,5 +76,6 @@ const ( // === From GatherFact ===
|
|||
)
|
||||
|
||||
const ( // === From runtime ===
|
||||
// VariableItem for "loop" argument when run a task.
|
||||
VariableItem = "item"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ func GetRuntimeDir() string {
|
|||
return filepath.Join(workDir, RuntimeDir)
|
||||
}
|
||||
|
||||
// RuntimeDirFromPipeline returns the absolute path of the runtime directory for specify Pipeline
|
||||
func RuntimeDirFromPipeline(obj kkcorev1.Pipeline) string {
|
||||
return filepath.Join(GetRuntimeDir(), kkcorev1.SchemeGroupVersion.String(),
|
||||
RuntimePipelineDir, obj.Namespace, obj.Name)
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ var (
|
|||
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
|
||||
// extensions group instead. This Scheme is special and should appear ONLY in
|
||||
// the api group, unless you really know what you're doing.
|
||||
// TODO(lavalamp): make the above error impossible.
|
||||
Scheme = newScheme()
|
||||
|
||||
// Codecs provides access to encoding and decoding for the scheme
|
||||
|
|
@ -51,5 +50,6 @@ func newScheme() *runtime.Scheme {
|
|||
utilruntime.Must(kkcorev1.AddToScheme(s))
|
||||
utilruntime.Must(kkcorev1alpha1.AddToScheme(s))
|
||||
utilruntime.Must(kkcorev1alpha1.AddConversionFuncs(s))
|
||||
|
||||
return s
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,6 +52,8 @@ workdir/
|
|||
| | | | |-- inventory.yaml
|
||||
|
|
||||
|-- kubekey/
|
||||
|-- artifact-path...
|
||||
|-- images
|
||||
|
|
||||
|-- kubernetes/
|
||||
|
||||
|
|
@ -117,9 +119,15 @@ const RuntimePipelineVariableDir = "variable"
|
|||
|
||||
// inventory.yaml is the data of Inventory resource
|
||||
|
||||
// "kubekey" is the default directory name under the working directory. It is used to store
|
||||
// ArtifactDir is the default directory name under the working directory. It is used to store
|
||||
// files required when executing the kubekey command (such as: docker, etcd, image packages, etc.).
|
||||
// These files will be downloaded locally and distributed to remote nodes.
|
||||
const ArtifactDir = "kubekey"
|
||||
|
||||
// artifact-path store artifact package.
|
||||
|
||||
// ArtifactImagesDir store images files. contains blobs and manifests.
|
||||
const ArtifactImagesDir = "images"
|
||||
|
||||
// KubernetesDir represents the remote host directory for each kubernetes connection
|
||||
const KubernetesDir = "kubernetes"
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ const (
|
|||
defaultServiceAccount = "kk-executor"
|
||||
)
|
||||
|
||||
// PipelineReconciler reconcile pipeline
|
||||
type PipelineReconciler struct {
|
||||
*runtime.Scheme
|
||||
ctrlclient.Client
|
||||
|
|
@ -62,13 +63,16 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(5).InfoS("pipeline not found", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if pipeline.DeletionTimestamp != nil {
|
||||
klog.V(5).InfoS("pipeline is deleting", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -78,6 +82,7 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
pipeline.Status.Phase = kkcorev1.PipelinePhasePending
|
||||
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
|
||||
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
case kkcorev1.PipelinePhasePending:
|
||||
|
|
@ -85,15 +90,18 @@ func (r PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
|
||||
if err := r.Client.Status().Patch(ctx, pipeline, ctrlclient.MergeFrom(excepted)); err != nil {
|
||||
klog.V(5).ErrorS(err, "update pipeline error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
case kkcorev1.PipelinePhaseRunning:
|
||||
|
||||
return r.dealRunningPipeline(ctx, pipeline)
|
||||
case kkcorev1.PipelinePhaseFailed:
|
||||
// do nothing
|
||||
case kkcorev1.PipelinePhaseSucceed:
|
||||
// do nothing
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -153,6 +161,7 @@ func (r *PipelineReconciler) dealRunningPipeline(ctx context.Context, pipeline *
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -199,6 +208,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k
|
|||
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, sa); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.ErrorS(err, "get service account", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
// create sa
|
||||
|
|
@ -206,6 +216,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k
|
|||
ObjectMeta: metav1.ObjectMeta{Name: saName, Namespace: pipeline.Namespace},
|
||||
}); err != nil {
|
||||
klog.ErrorS(err, "create service account error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -214,6 +225,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k
|
|||
if err := r.Client.Get(ctx, ctrlclient.ObjectKey{Namespace: pipeline.Namespace, Name: saName}, rb); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
//create rolebinding
|
||||
|
|
@ -234,12 +246,15 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k
|
|||
},
|
||||
}); err != nil {
|
||||
klog.ErrorS(err, "create role binding error", "pipeline", ctrlclient.ObjectKeyFromObject(&pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateJobSpec for pipeline
|
||||
func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1.JobSpec {
|
||||
// get ServiceAccount name for executor pod
|
||||
saName, ok := os.LookupEnv("EXECUTOR_SERVICEACCOUNT")
|
||||
|
|
@ -282,6 +297,7 @@ func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
return jobSpec
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package converter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
|
@ -29,11 +29,11 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
)
|
||||
|
||||
// MarshalBlock marshal block to task
|
||||
func MarshalBlock(ctx context.Context, role string, hosts []string, when []string, block projectv1.Block) *kkcorev1alpha1.Task {
|
||||
func MarshalBlock(role string, hosts []string, when []string, block kkprojectv1.Block) *kkcorev1alpha1.Task {
|
||||
task := &kkcorev1alpha1.Task{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Task",
|
||||
|
|
@ -45,7 +45,7 @@ func MarshalBlock(ctx context.Context, role string, hosts []string, when []strin
|
|||
kkcorev1alpha1.TaskAnnotationRole: role,
|
||||
},
|
||||
},
|
||||
Spec: kkcorev1alpha1.KubeKeyTaskSpec{
|
||||
Spec: kkcorev1alpha1.TaskSpec{
|
||||
Name: block.Name,
|
||||
Hosts: hosts,
|
||||
IgnoreError: block.IgnoreErrors,
|
||||
|
|
@ -55,6 +55,7 @@ func MarshalBlock(ctx context.Context, role string, hosts []string, when []strin
|
|||
Register: block.Register,
|
||||
},
|
||||
}
|
||||
|
||||
if block.Loop != nil {
|
||||
data, err := json.Marshal(block.Loop)
|
||||
if err != nil {
|
||||
|
|
@ -95,13 +96,14 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
|
|||
sis[i] = b
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown serial type. only support int or percent")
|
||||
return nil, errors.New("unknown serial type. only support int or percent")
|
||||
}
|
||||
if sis[i] == 0 {
|
||||
return nil, fmt.Errorf("serial %v should not be zero", a)
|
||||
}
|
||||
count += sis[i]
|
||||
}
|
||||
|
||||
if len(hosts) > count {
|
||||
for i := 0.0; i < float64(len(hosts)-count)/float64(sis[len(sis)-1]); i++ {
|
||||
sis = append(sis, sis[len(sis)-1])
|
||||
|
|
@ -119,5 +121,6 @@ func GroupHostBySerial(hosts []string, serial []any) ([][]string, error) {
|
|||
result[i] = hosts[begin:end]
|
||||
begin += si
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -93,12 +93,15 @@ func TestGroupHostBySerial(t *testing.T) {
|
|||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := GroupHostBySerial(hosts, tc.serial)
|
||||
if tc.exceptErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.exceptResult, result)
|
||||
if err != nil {
|
||||
if tc.exceptErr {
|
||||
assert.Error(t, err)
|
||||
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, tc.exceptResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,11 +9,11 @@ import (
|
|||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Template parse file or vars which defined in project.
|
||||
var Template = template.New("kubekey").Funcs(funcMap())
|
||||
|
||||
func funcMap() template.FuncMap {
|
||||
var f = sprig.TxtFuncMap()
|
||||
|
||||
delete(f, "env")
|
||||
delete(f, "expandenv")
|
||||
// add custom function
|
||||
|
|
@ -28,12 +28,13 @@ func funcMap() template.FuncMap {
|
|||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) string {
|
||||
func toYAML(v any) string {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(data), "\n")
|
||||
}
|
||||
|
||||
|
|
@ -41,13 +42,15 @@ func toYAML(v interface{}) string {
|
|||
func ipInCIDR(index int, cidr string) (string, error) {
|
||||
var ips = make([]string, 0)
|
||||
for _, s := range strings.Split(cidr, ",") {
|
||||
ips = append(ips, parseIp(s)...)
|
||||
ips = append(ips, parseIP(s)...)
|
||||
}
|
||||
|
||||
if index < 0 {
|
||||
index = max(len(ips)+index, 0)
|
||||
}
|
||||
index = max(index, 0)
|
||||
index = min(index, len(ips)-1)
|
||||
|
||||
return ips[index], nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// parseIp parse cidr to actual ip slice. or parse the ip range string (format xxx-xxx) to actual ip slice,
|
||||
func parseIp(ip string) []string {
|
||||
// parseIP parse cidr to actual ip slice, or parse the ip range string (format xxx-xxx) to actual ip slice,
|
||||
func parseIP(ip string) []string {
|
||||
var availableIPs []string
|
||||
// if ip is "1.1.1.1/",trim /
|
||||
ip = strings.TrimRight(ip, "/")
|
||||
|
|
@ -25,6 +25,7 @@ func parseIp(ip string) []string {
|
|||
} else {
|
||||
availableIPs = append(availableIPs, ip)
|
||||
}
|
||||
|
||||
return availableIPs
|
||||
}
|
||||
|
||||
|
|
@ -36,21 +37,21 @@ func getAvailableIPRange(ipStart, ipEnd string) []string {
|
|||
if firstIP.To4() == nil || endIP.To4() == nil {
|
||||
return availableIPs
|
||||
}
|
||||
|
||||
firstIPNum := ipToInt(firstIP.To4())
|
||||
EndIPNum := ipToInt(endIP.To4())
|
||||
endIPNum := ipToInt(endIP.To4())
|
||||
pos := int32(1)
|
||||
|
||||
newNum := firstIPNum
|
||||
|
||||
for newNum <= EndIPNum {
|
||||
for newNum <= endIPNum {
|
||||
availableIPs = append(availableIPs, intToIP(newNum).String())
|
||||
newNum += pos
|
||||
}
|
||||
|
||||
return availableIPs
|
||||
}
|
||||
|
||||
func getAvailableIP(ipAndMask string) []string {
|
||||
var availableIPs []string
|
||||
var availableIPs = make([]string, 0)
|
||||
|
||||
ipAndMask = strings.TrimSpace(ipAndMask)
|
||||
ipAndMask = iPAddressToCIDR(ipAndMask)
|
||||
|
|
@ -63,11 +64,12 @@ func getAvailableIP(ipAndMask string) []string {
|
|||
m := size - 2 // -1 for the broadcast address, -1 for the gateway address
|
||||
|
||||
var newNum int32
|
||||
for attempt := int32(0); attempt < m; attempt++ {
|
||||
for range m {
|
||||
newNum = ipNum + pos
|
||||
pos = pos%m + 1
|
||||
availableIPs = append(availableIPs, intToIP(newNum).String())
|
||||
}
|
||||
|
||||
return availableIPs
|
||||
}
|
||||
|
||||
|
|
@ -78,7 +80,8 @@ func ipToInt(ip net.IP) int32 {
|
|||
func intToIP(n int32) net.IP {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, uint32(n))
|
||||
return net.IP(b)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func iPAddressToCIDR(ipAddress string) string {
|
||||
|
|
@ -89,10 +92,11 @@ func iPAddressToCIDR(ipAddress string) string {
|
|||
if strings.Contains(mask, ".") {
|
||||
mask = iPMaskStringToCIDR(mask)
|
||||
}
|
||||
|
||||
return ip + "/" + mask
|
||||
} else {
|
||||
return ipAddress
|
||||
}
|
||||
|
||||
return ipAddress
|
||||
}
|
||||
|
||||
func iPMaskStringToCIDR(netmask string) string {
|
||||
|
|
@ -101,8 +105,10 @@ func iPMaskStringToCIDR(netmask string) string {
|
|||
for i, v := range netmaskList {
|
||||
mint[i], _ = strconv.Atoi(v)
|
||||
}
|
||||
|
||||
myIPMask := net.IPv4Mask(byte(mint[0]), byte(mint[1]), byte(mint[2]), byte(mint[3]))
|
||||
ones, _ := myIPMask.Size()
|
||||
|
||||
return strconv.Itoa(ones)
|
||||
}
|
||||
|
||||
|
|
@ -113,13 +119,15 @@ func networkRange(network *net.IPNet) (net.IP, net.IP) {
|
|||
for i := 0; i < len(lastIP); i++ {
|
||||
lastIP[i] = netIP[i] | ^network.Mask[i]
|
||||
}
|
||||
|
||||
return firstIP, lastIP
|
||||
}
|
||||
|
||||
func networkSize(mask net.IPMask) int32 {
|
||||
m := net.IPv4Mask(0, 0, 0, 0)
|
||||
for i := 0; i < net.IPv4len; i++ {
|
||||
for i := range net.IPv4len {
|
||||
m[i] = ^mask[i]
|
||||
}
|
||||
|
||||
return int32(binary.BigEndian.Uint32(m)) + 1
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,11 +19,12 @@ func TestParseIp(t *testing.T) {
|
|||
excepted: func() []string {
|
||||
// 192.168.0.1 - 192.168.63.254
|
||||
var ips []string
|
||||
for i := 0; i <= 63; i++ {
|
||||
for j := 0; j <= 255; j++ {
|
||||
for i := range 64 {
|
||||
for j := range 256 {
|
||||
ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j))
|
||||
}
|
||||
}
|
||||
|
||||
return ips[1 : len(ips)-1]
|
||||
},
|
||||
},
|
||||
|
|
@ -33,11 +34,12 @@ func TestParseIp(t *testing.T) {
|
|||
excepted: func() []string {
|
||||
// 192.168.0.1 - 192.168.63.254
|
||||
var ips []string
|
||||
for i := 0; i <= 63; i++ {
|
||||
for j := 0; j <= 255; j++ {
|
||||
for i := range 64 {
|
||||
for j := range 256 {
|
||||
ips = append(ips, fmt.Sprintf("192.168.%d.%d", i, j))
|
||||
}
|
||||
}
|
||||
|
||||
return ips[1 : len(ips)-1]
|
||||
},
|
||||
},
|
||||
|
|
@ -45,7 +47,7 @@ func TestParseIp(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.excepted(), parseIp(tc.ipRange))
|
||||
assert.Equal(t, tc.excepted(), parseIP(tc.ipRange))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,19 +32,22 @@ func ParseBool(ctx map[string]any, inputs []string) (bool, error) {
|
|||
if !IsTmplSyntax(input) {
|
||||
input = "{{ " + input + " }}"
|
||||
}
|
||||
|
||||
tl, err := internal.Template.Parse(input)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse template '%s': %v", input, err)
|
||||
return false, fmt.Errorf("failed to parse template '%s': %w", input, err)
|
||||
}
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
if err := tl.Execute(result, ctx); err != nil {
|
||||
return false, fmt.Errorf("failed to execute template '%s': %v", input, err)
|
||||
return false, fmt.Errorf("failed to execute template '%s': %w", input, err)
|
||||
}
|
||||
klog.V(6).InfoS(" parse template succeed", "result", result.String())
|
||||
if result.String() != "true" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
@ -53,15 +56,18 @@ func ParseString(ctx map[string]any, input string) (string, error) {
|
|||
if !IsTmplSyntax(input) {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
tl, err := internal.Template.Parse(input)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template '%s': %v", input, err)
|
||||
return "", fmt.Errorf("failed to parse template '%s': %w", input, err)
|
||||
}
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
if err := tl.Execute(result, ctx); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template '%s': %v", input, err)
|
||||
return "", fmt.Errorf("failed to execute template '%s': %w", input, err)
|
||||
}
|
||||
klog.V(6).InfoS(" parse template succeed", "result", result.String())
|
||||
|
||||
return strings.TrimPrefix(strings.TrimSuffix(result.String(), "\n"), "\n"), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -85,13 +85,13 @@ func TestParseBool(t *testing.T) {
|
|||
{
|
||||
name: "eq true-1",
|
||||
condition: []string{"{{ ne .foo \"\" }}"},
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: true,
|
||||
},
|
||||
{
|
||||
name: "eq true-1",
|
||||
condition: []string{"{{ and .foo (ne .foo \"\") }}"},
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: false,
|
||||
},
|
||||
// ======= value exist =======
|
||||
|
|
@ -123,7 +123,7 @@ func TestParseBool(t *testing.T) {
|
|||
{
|
||||
name: "default true-1",
|
||||
condition: []string{"{{ .foo | default true }}"},
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: true,
|
||||
},
|
||||
// ======= has =======
|
||||
|
|
@ -366,20 +366,20 @@ func TestParseFunction(t *testing.T) {
|
|||
{
|
||||
name: "default string 1",
|
||||
input: "{{ .foo | default \"bar\" }}",
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: "bar",
|
||||
},
|
||||
{
|
||||
name: "default string 2",
|
||||
input: "{{ default .foo \"bar\" }}",
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: "bar",
|
||||
},
|
||||
|
||||
{
|
||||
name: "default number 1",
|
||||
input: "{{ .foo | default 1 }}",
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: "1",
|
||||
},
|
||||
// ======= split =======
|
||||
|
|
@ -575,7 +575,7 @@ func TestParseFunction(t *testing.T) {
|
|||
{
|
||||
name: "trimPrefix 2",
|
||||
input: `{{ .foo | default "" |trimPrefix "v" }}`,
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: "",
|
||||
},
|
||||
}
|
||||
|
|
@ -627,7 +627,7 @@ func TestParseCustomFunction(t *testing.T) {
|
|||
{
|
||||
name: "pow true-1",
|
||||
input: "{{ pow 2 3 }}",
|
||||
variable: map[string]any{},
|
||||
variable: make(map[string]any),
|
||||
excepted: "8",
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,209 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/modules"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
)
|
||||
|
||||
type blockExecutor struct {
|
||||
*option
|
||||
|
||||
// playbook level config
|
||||
hosts []string // which hosts will run playbook
|
||||
ignoreErrors *bool // IgnoreErrors for playbook
|
||||
// blocks level config
|
||||
blocks []kkprojectv1.Block
|
||||
role string // role name of blocks
|
||||
when []string // when condition for blocks
|
||||
tags kkprojectv1.Taggable
|
||||
}
|
||||
|
||||
// Exec block. convert block to task and executor it.
|
||||
func (e blockExecutor) Exec(ctx context.Context) error {
|
||||
for _, block := range e.blocks {
|
||||
hosts := e.dealRunOnce(block.RunOnce)
|
||||
tags := e.dealTags(block.Taggable)
|
||||
ignoreErrors := e.dealIgnoreErrors(block.IgnoreErrors)
|
||||
when := e.dealWhen(block.When)
|
||||
|
||||
// // check tags
|
||||
if !tags.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
|
||||
// if not match the tags. skip
|
||||
continue
|
||||
}
|
||||
|
||||
// merge variable which defined in block
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(block.Vars, hosts...)); err != nil {
|
||||
klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", block.Name)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(block.Block) != 0:
|
||||
if err := e.dealBlock(ctx, hosts, ignoreErrors, when, tags, block); err != nil {
|
||||
klog.V(5).ErrorS(err, "deal block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
case block.IncludeTasks != "":
|
||||
// do nothing. include tasks has converted to blocks.
|
||||
default:
|
||||
if err := e.dealTask(ctx, hosts, when, block); err != nil {
|
||||
klog.V(5).ErrorS(err, "deal task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealRunOnce "run_once" argument in block.
|
||||
// If RunOnce is true, it's always only run in the first host.
|
||||
// Otherwise, return hosts which defined in parent block.
|
||||
func (e blockExecutor) dealRunOnce(runOnce bool) []string {
|
||||
hosts := e.hosts
|
||||
if runOnce {
|
||||
// runOnce only run in first node
|
||||
hosts = hosts[:1]
|
||||
}
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
||||
// dealIgnoreErrors "ignore_errors" argument in block.
|
||||
// if ignore_errors not defined in block, set it which defined in parent block.
|
||||
func (e blockExecutor) dealIgnoreErrors(ie *bool) *bool {
|
||||
if ie == nil {
|
||||
ie = e.ignoreErrors
|
||||
}
|
||||
|
||||
return ie
|
||||
}
|
||||
|
||||
// dealTags "tags" argument in block. block tags inherits parent block
|
||||
func (e blockExecutor) dealTags(taggable kkprojectv1.Taggable) kkprojectv1.Taggable {
|
||||
return kkprojectv1.JoinTag(taggable, e.tags)
|
||||
}
|
||||
|
||||
// dealWhen argument in block. block when inherits parent block.
|
||||
func (e blockExecutor) dealWhen(when kkprojectv1.When) []string {
|
||||
w := e.when
|
||||
for _, d := range when.Data {
|
||||
if !slices.Contains(w, d) {
|
||||
w = append(w, d)
|
||||
}
|
||||
}
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// dealBlock "block" argument has defined in block. execute order is: block -> rescue -> always
|
||||
// If rescue is defined, execute it when block execute error.
|
||||
// If always id defined, execute it.
|
||||
func (e blockExecutor) dealBlock(ctx context.Context, hosts []string, ignoreErrors *bool, when []string, tags kkprojectv1.Taggable, block kkprojectv1.Block) error {
|
||||
var errs error
|
||||
// exec block
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
role: e.role,
|
||||
blocks: block.Block,
|
||||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from block error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
// if block exec failed exec rescue
|
||||
if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(block.Rescue) != 0 {
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: block.Rescue,
|
||||
role: e.role,
|
||||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from rescue error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
// exec always after block
|
||||
if len(block.Always) != 0 {
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: block.Always,
|
||||
role: e.role,
|
||||
when: when,
|
||||
tags: tags,
|
||||
}.Exec(ctx)); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from always error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
// when execute error. return
|
||||
return errs
|
||||
}
|
||||
|
||||
// dealTask "block" argument is not defined in block.
|
||||
func (e blockExecutor) dealTask(ctx context.Context, hosts []string, when []string, block kkprojectv1.Block) error {
|
||||
task := converter.MarshalBlock(e.role, hosts, when, block)
|
||||
// complete by pipeline
|
||||
task.GenerateName = e.pipeline.Name + "-"
|
||||
task.Namespace = e.pipeline.Namespace
|
||||
if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil {
|
||||
klog.V(5).ErrorS(err, "Set controller reference error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
// complete module by unknown field
|
||||
for n, a := range block.UnknownField {
|
||||
data, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "Marshal unknown field error", "field", n, "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
if m := modules.FindModule(n); m != nil {
|
||||
task.Spec.Module.Name = n
|
||||
task.Spec.Module.Args = runtime.RawExtension{Raw: data}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if task.Spec.Module.Name == "" { // action is necessary for a task
|
||||
klog.V(5).ErrorS(nil, "No module/action detected in task", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return fmt.Errorf("no module/action detected in task: %s", task.Name)
|
||||
}
|
||||
|
||||
if err := (taskExecutor{option: e.option, task: task}.Exec(ctx)); err != nil {
|
||||
klog.V(5).ErrorS(err, "exec task error", "block", block.Name, "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
)
|
||||
|
||||
func TestBlockExecutor_DealRunOnce(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
runOnce bool
|
||||
except []string
|
||||
}{
|
||||
{
|
||||
name: "runonce is false",
|
||||
runOnce: false,
|
||||
except: []string{"node1", "node2", "node3"},
|
||||
},
|
||||
{
|
||||
name: "runonce is true",
|
||||
runOnce: true,
|
||||
except: []string{"node1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.ElementsMatch(t, blockExecutor{
|
||||
hosts: []string{"node1", "node2", "node3"},
|
||||
}.dealRunOnce(tc.runOnce), tc.except)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockExecutor_DealIgnoreErrors(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
ignoreErrors *bool
|
||||
except *bool
|
||||
}{
|
||||
{
|
||||
name: "ignoreErrors is empty",
|
||||
ignoreErrors: nil,
|
||||
except: ptr.To(true),
|
||||
},
|
||||
{
|
||||
name: "ignoreErrors is true",
|
||||
ignoreErrors: ptr.To(true),
|
||||
except: ptr.To(true),
|
||||
},
|
||||
{
|
||||
name: "ignoreErrors is false",
|
||||
ignoreErrors: ptr.To(false),
|
||||
except: ptr.To(false),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, blockExecutor{
|
||||
ignoreErrors: ptr.To(true),
|
||||
}.dealIgnoreErrors(tc.ignoreErrors), tc.except)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockExecutor_DealTags(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
tags kkprojectv1.Taggable
|
||||
except kkprojectv1.Taggable
|
||||
}{
|
||||
{
|
||||
name: "single tags",
|
||||
tags: kkprojectv1.Taggable{Tags: []string{"c"}},
|
||||
except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}},
|
||||
},
|
||||
{
|
||||
name: "mutil tags",
|
||||
tags: kkprojectv1.Taggable{Tags: []string{"c", "d"}},
|
||||
except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c", "d"}},
|
||||
},
|
||||
{
|
||||
name: "repeat tags",
|
||||
tags: kkprojectv1.Taggable{Tags: []string{"b", "c"}},
|
||||
except: kkprojectv1.Taggable{Tags: []string{"a", "b", "c"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.ElementsMatch(t, blockExecutor{
|
||||
tags: kkprojectv1.Taggable{Tags: []string{"a", "b"}},
|
||||
}.dealTags(tc.tags).Tags, tc.except.Tags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockExecutor_DealWhen(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
when []string
|
||||
except []string
|
||||
}{
|
||||
{
|
||||
name: "single when",
|
||||
when: []string{"c"},
|
||||
except: []string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
name: "mutil when",
|
||||
when: []string{"c", "d"},
|
||||
except: []string{"a", "b", "c", "d"},
|
||||
},
|
||||
{
|
||||
name: "repeat when",
|
||||
when: []string{"b", "c"},
|
||||
except: []string{"a", "b", "c"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.ElementsMatch(t, blockExecutor{
|
||||
when: []string{"a", "b"},
|
||||
}.dealWhen(kkprojectv1.When{Data: tc.when}), tc.except)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,602 +1,26 @@
|
|||
/*
|
||||
Copyright 2024 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
projectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/connector"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/modules"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/project"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
)
|
||||
|
||||
// TaskExecutor all task in pipeline
|
||||
type TaskExecutor interface {
|
||||
// Executor all task in pipeline
|
||||
type Executor interface {
|
||||
Exec(ctx context.Context) error
|
||||
}
|
||||
|
||||
func NewTaskExecutor(client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) TaskExecutor {
|
||||
// get variable
|
||||
v, err := variable.New(client, *pipeline)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
return nil
|
||||
}
|
||||
|
||||
return &executor{
|
||||
client: client,
|
||||
pipeline: pipeline,
|
||||
variable: v,
|
||||
logOutput: logOutput,
|
||||
}
|
||||
}
|
||||
|
||||
type executor struct {
|
||||
// option for pipelineExecutor, blockExecutor, taskExecutor
|
||||
type option struct {
|
||||
client ctrlclient.Client
|
||||
|
||||
pipeline *kkcorev1.Pipeline
|
||||
variable variable.Variable
|
||||
|
||||
// commandLine log output. default os.stdout
|
||||
logOutput io.Writer
|
||||
}
|
||||
|
||||
type execBlockOptions struct {
|
||||
// playbook level config
|
||||
hosts []string // which hosts will run playbook
|
||||
ignoreErrors *bool // IgnoreErrors for playbook
|
||||
// blocks level config
|
||||
blocks []projectv1.Block
|
||||
role string // role name of blocks
|
||||
when []string // when condition for blocks
|
||||
tags projectv1.Taggable
|
||||
}
|
||||
|
||||
func (e executor) Exec(ctx context.Context) error {
|
||||
klog.V(6).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
pj, err := project.New(*e.pipeline, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deal project error: %w", err)
|
||||
}
|
||||
|
||||
// convert to transfer.Playbook struct
|
||||
pb, err := pj.MarshalPlaybook()
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert playbook error: %w", err)
|
||||
}
|
||||
|
||||
for _, play := range pb.Play {
|
||||
if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
|
||||
// if not match the tags. skip
|
||||
continue
|
||||
}
|
||||
// hosts should contain all host's name. hosts should not be empty.
|
||||
var hosts []string
|
||||
if ahn, err := e.variable.Get(variable.GetHostnames(play.PlayHost.Hosts)); err == nil {
|
||||
hosts = ahn.([]string)
|
||||
}
|
||||
if len(hosts) == 0 { // if hosts is empty skip this playbook
|
||||
klog.V(5).Info("Hosts is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
continue
|
||||
}
|
||||
|
||||
// when gather_fact is set. get host's information from remote.
|
||||
if play.GatherFacts {
|
||||
for _, h := range hosts {
|
||||
gfv, err := e.getGatherFact(ctx, h, e.variable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get gather fact error: %w", err)
|
||||
}
|
||||
// merge host information to runtime variable
|
||||
if err := e.variable.Merge(variable.MergeRemoteVariable(h, gfv)); err != nil {
|
||||
klog.V(5).ErrorS(err, "Merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", h)
|
||||
return fmt.Errorf("merge gather fact error: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch execution, with each batch being a group of hosts run in serial.
|
||||
var batchHosts [][]string
|
||||
if play.RunOnce {
|
||||
// runOnce only run in first node
|
||||
batchHosts = [][]string{{hosts[0]}}
|
||||
} else {
|
||||
// group hosts by serial. run the playbook by serial
|
||||
batchHosts, err = converter.GroupHostBySerial(hosts, play.Serial.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("group host by serial error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// generate and execute task.
|
||||
for _, serials := range batchHosts {
|
||||
// each batch hosts should not be empty.
|
||||
if len(serials) == 0 {
|
||||
klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
return fmt.Errorf("host is empty")
|
||||
}
|
||||
|
||||
if err := e.mergeVariable(ctx, e.variable, play.Vars, serials...); err != nil {
|
||||
return fmt.Errorf("merge variable error: %w", err)
|
||||
}
|
||||
// generate task from pre tasks
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.PreTasks,
|
||||
tags: play.Taggable,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("execute pre-tasks from play error: %w", err)
|
||||
}
|
||||
// generate task from role
|
||||
for _, role := range play.Roles {
|
||||
if err := e.mergeVariable(ctx, e.variable, role.Vars, serials...); err != nil {
|
||||
return fmt.Errorf("merge variable error: %w", err)
|
||||
}
|
||||
// use the most closely configuration
|
||||
ignoreErrors := role.IgnoreErrors
|
||||
if ignoreErrors == nil {
|
||||
ignoreErrors = play.IgnoreErrors
|
||||
}
|
||||
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: serials,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: role.Block,
|
||||
role: role.Role,
|
||||
when: role.When.Data,
|
||||
tags: projectv1.JoinTag(role.Taggable, play.Taggable),
|
||||
}); err != nil {
|
||||
return fmt.Errorf("execute role-tasks error: %w", err)
|
||||
}
|
||||
}
|
||||
// generate task from tasks
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.Tasks,
|
||||
tags: play.Taggable,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("execute tasks error: %w", err)
|
||||
}
|
||||
// generate task from post tasks
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.Tasks,
|
||||
tags: play.Taggable,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("execute post-tasks error: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getGatherFact get host info
|
||||
func (e executor) getGatherFact(ctx context.Context, hostname string, vars variable.Variable) (map[string]any, error) {
|
||||
v, err := vars.Get(variable.GetParamVariable(hostname))
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "Get host variable error", "hostname", hostname)
|
||||
return nil, err
|
||||
}
|
||||
connectorVars := make(map[string]any)
|
||||
if c1, ok := v.(map[string]any)[_const.VariableConnector]; ok {
|
||||
if c2, ok := c1.(map[string]any); ok {
|
||||
connectorVars = c2
|
||||
}
|
||||
}
|
||||
conn, err := connector.NewConnector(hostname, connectorVars)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "New connector error", "hostname", hostname)
|
||||
return nil, err
|
||||
}
|
||||
if err := conn.Init(ctx); err != nil {
|
||||
klog.V(5).ErrorS(err, "Init connection error", "hostname", hostname)
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
|
||||
if gf, ok := conn.(connector.GatherFacts); ok {
|
||||
return gf.Info(ctx)
|
||||
}
|
||||
klog.V(5).ErrorS(nil, "gather fact is not defined in this connector", "hostname", hostname)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// execBlock loop block and generate task.
|
||||
func (e executor) execBlock(ctx context.Context, options execBlockOptions) error {
|
||||
for _, at := range options.blocks {
|
||||
if !projectv1.JoinTag(at.Taggable, options.tags).IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
|
||||
continue
|
||||
}
|
||||
hosts := options.hosts
|
||||
if at.RunOnce { // only run in first host
|
||||
hosts = []string{options.hosts[0]}
|
||||
}
|
||||
tags := projectv1.JoinTag(at.Taggable, options.tags)
|
||||
|
||||
// use the most closely configuration
|
||||
ignoreErrors := at.IgnoreErrors
|
||||
if ignoreErrors == nil {
|
||||
ignoreErrors = options.ignoreErrors
|
||||
}
|
||||
// merge variable which defined in block
|
||||
if err := e.mergeVariable(ctx, e.variable, at.Vars, hosts...); err != nil {
|
||||
klog.V(5).ErrorS(err, "merge variable error", "pipeline", e.pipeline, "block", at.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(at.Block) != 0:
|
||||
var errs error
|
||||
// exec block
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
role: options.role,
|
||||
blocks: at.Block,
|
||||
when: append(options.when, at.When.Data...),
|
||||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
// if block exec failed exec rescue
|
||||
if e.pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed && len(at.Rescue) != 0 {
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: at.Rescue,
|
||||
role: options.role,
|
||||
when: append(options.when, at.When.Data...),
|
||||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from rescue error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// exec always after block
|
||||
if len(at.Always) != 0 {
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: hosts,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: at.Always,
|
||||
role: options.role,
|
||||
when: append(options.when, at.When.Data...),
|
||||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(5).ErrorS(err, "execute tasks from always error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// when execute error. return
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
|
||||
case at.IncludeTasks != "":
|
||||
// include tasks has converted to blocks.
|
||||
// do nothing
|
||||
default:
|
||||
task := converter.MarshalBlock(ctx, options.role, hosts, append(options.when, at.When.Data...), at)
|
||||
// complete by pipeline
|
||||
task.GenerateName = e.pipeline.Name + "-"
|
||||
task.Namespace = e.pipeline.Namespace
|
||||
if err := controllerutil.SetControllerReference(e.pipeline, task, e.client.Scheme()); err != nil {
|
||||
klog.V(5).ErrorS(err, "Set controller reference error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
}
|
||||
// complete module by unknown field
|
||||
for n, a := range at.UnknownFiled {
|
||||
data, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "Marshal unknown field error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name, "field", n)
|
||||
return err
|
||||
}
|
||||
if m := modules.FindModule(n); m != nil {
|
||||
task.Spec.Module.Name = n
|
||||
task.Spec.Module.Args = runtime.RawExtension{Raw: data}
|
||||
break
|
||||
}
|
||||
}
|
||||
if task.Spec.Module.Name == "" { // action is necessary for a task
|
||||
klog.V(5).ErrorS(nil, "No module/action detected in task", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return fmt.Errorf("no module/action detected in task: %s", task.Name)
|
||||
}
|
||||
// create task
|
||||
if err := e.client.Create(ctx, task); err != nil {
|
||||
klog.V(5).ErrorS(err, "create task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
var roleLog string
|
||||
if task.Annotations[kkcorev1alpha1.TaskAnnotationRole] != "" {
|
||||
roleLog = "[" + task.Annotations[kkcorev1alpha1.TaskAnnotationRole] + "] "
|
||||
}
|
||||
klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, task.Spec.Name)
|
||||
// exec task
|
||||
task.Status.Phase = kkcorev1alpha1.TaskPhaseRunning
|
||||
if err := e.client.Status().Update(ctx, task); err != nil {
|
||||
klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
}
|
||||
if err := e.executeTask(ctx, task, options); err != nil {
|
||||
klog.V(5).ErrorS(err, "exec task error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
}
|
||||
if err := e.client.Status().Update(ctx, task); err != nil {
|
||||
klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
return err
|
||||
}
|
||||
|
||||
if task.IsComplete() {
|
||||
break
|
||||
}
|
||||
}
|
||||
e.pipeline.Status.TaskResult.Total++
|
||||
switch task.Status.Phase {
|
||||
case kkcorev1alpha1.TaskPhaseSuccess:
|
||||
e.pipeline.Status.TaskResult.Success++
|
||||
case kkcorev1alpha1.TaskPhaseIgnored:
|
||||
e.pipeline.Status.TaskResult.Ignored++
|
||||
case kkcorev1alpha1.TaskPhaseFailed:
|
||||
e.pipeline.Status.TaskResult.Failed++
|
||||
}
|
||||
|
||||
// exit when task run failed
|
||||
if task.IsFailed() {
|
||||
var hostReason []kkcorev1.PipelineFailedDetailHost
|
||||
for _, tr := range task.Status.HostResults {
|
||||
hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{
|
||||
Host: tr.Host,
|
||||
Stdout: tr.Stdout,
|
||||
StdErr: tr.StdErr,
|
||||
})
|
||||
}
|
||||
e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{
|
||||
Task: task.Spec.Name,
|
||||
Hosts: hostReason,
|
||||
})
|
||||
e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
|
||||
return fmt.Errorf("task %s run failed", task.Spec.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeTask parallel in each host.
|
||||
func (e executor) executeTask(ctx context.Context, task *kkcorev1alpha1.Task, options execBlockOptions) error {
|
||||
// check task host results
|
||||
wg := &wait.Group{}
|
||||
task.Status.HostResults = make([]kkcorev1alpha1.TaskHostResult, len(task.Spec.Hosts))
|
||||
|
||||
for i, h := range task.Spec.Hosts {
|
||||
wg.StartWithContext(ctx, func(ctx context.Context) {
|
||||
// task result
|
||||
var stdout, stderr string
|
||||
defer func() {
|
||||
if task.Spec.Register != "" {
|
||||
var stdoutResult any = stdout
|
||||
var stderrResult any = stderr
|
||||
// try to convert by json
|
||||
_ = json.Unmarshal([]byte(stdout), &stdoutResult)
|
||||
// try to convert by json
|
||||
_ = json.Unmarshal([]byte(stderr), &stderrResult)
|
||||
// set variable to parent location
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
task.Spec.Register: map[string]any{
|
||||
"stdout": stdoutResult,
|
||||
"stderr": stderrResult,
|
||||
},
|
||||
})); err != nil {
|
||||
stderr = fmt.Sprintf("register task result to variable error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if stderr != "" && task.Spec.IgnoreError != nil && *task.Spec.IgnoreError {
|
||||
klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
} else if stderr != "" {
|
||||
klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
}
|
||||
// fill result
|
||||
task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{
|
||||
Host: h,
|
||||
Stdout: stdout,
|
||||
StdErr: stderr,
|
||||
}
|
||||
}()
|
||||
// task log
|
||||
// placeholder format task log
|
||||
var placeholder string
|
||||
if hostNameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil {
|
||||
placeholder = strings.Repeat(" ", hostNameMaxLen.(int)-len(h))
|
||||
}
|
||||
// progress bar for task
|
||||
var bar = progressbar.NewOptions(-1,
|
||||
progressbar.OptionSetWriter(e.logOutput),
|
||||
progressbar.OptionSpinnerCustom([]string{" "}),
|
||||
progressbar.OptionEnableColorCodes(true),
|
||||
progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
if _, err := os.Stdout.WriteString("\n"); err != nil {
|
||||
klog.ErrorS(err, "failed to write output", "host", h)
|
||||
}
|
||||
}),
|
||||
)
|
||||
go func() {
|
||||
for !bar.IsFinished() {
|
||||
if err := bar.Add(1); err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
switch {
|
||||
case stderr != "":
|
||||
if task.Spec.IgnoreError != nil && *task.Spec.IgnoreError { // ignore
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder))
|
||||
} else { // failed
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[31mfailed \033[0m", h, placeholder))
|
||||
}
|
||||
case stdout == "skip": // skip
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mskip \033[0m", h, placeholder))
|
||||
default: //success
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34msuccess\033[0m", h, placeholder))
|
||||
}
|
||||
if err := bar.Finish(); err != nil {
|
||||
klog.ErrorS(err, "finish bar error")
|
||||
}
|
||||
}()
|
||||
// task execute
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(h))
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("get variable error: %v", err)
|
||||
return
|
||||
}
|
||||
// check when condition
|
||||
if len(task.Spec.When) > 0 {
|
||||
ok, err := tmpl.ParseBool(ha.(map[string]any), task.Spec.When)
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("parse when condition error: %v", err)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
stdout = "skip"
|
||||
return
|
||||
}
|
||||
}
|
||||
// execute module with loop
|
||||
// if loop is empty. execute once, and the item is null
|
||||
for _, item := range e.parseLoop(ctx, ha.(map[string]any), task) {
|
||||
// set item to runtime variable
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
_const.VariableItem: item,
|
||||
})); err != nil {
|
||||
stderr = fmt.Sprintf("set loop item to variable error: %v", err)
|
||||
return
|
||||
}
|
||||
stdout, stderr = e.executeModule(ctx, task, modules.ExecOptions{
|
||||
Args: task.Spec.Module.Args,
|
||||
Host: h,
|
||||
Variable: e.variable,
|
||||
Task: *task,
|
||||
Pipeline: *e.pipeline,
|
||||
})
|
||||
// delete item
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
_const.VariableItem: nil,
|
||||
})); err != nil {
|
||||
stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
// host result for task
|
||||
task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess
|
||||
for _, data := range task.Status.HostResults {
|
||||
if data.StdErr != "" {
|
||||
if task.Spec.IgnoreError != nil && *task.Spec.IgnoreError {
|
||||
task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored
|
||||
} else {
|
||||
task.Status.Phase = kkcorev1alpha1.TaskPhaseFailed
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseLoop parse loop to slice. if loop contains template string. convert it.
|
||||
// loop is json string. try convertor to string slice by json.
|
||||
// loop is normal string. set it to empty slice and return.
|
||||
// loop is string slice. return it.
|
||||
func (e executor) parseLoop(ctx context.Context, ha map[string]any, task *kkcorev1alpha1.Task) []any {
|
||||
switch {
|
||||
case task.Spec.Loop.Raw == nil:
|
||||
// loop is not set. add one element to execute once module.
|
||||
return []any{nil}
|
||||
default:
|
||||
return variable.Extension2Slice(ha, task.Spec.Loop)
|
||||
}
|
||||
}
|
||||
|
||||
// executeModule find register module and execute it.
|
||||
func (e executor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, opts modules.ExecOptions) (string, string) {
|
||||
// get all variable. which contains item.
|
||||
lg, err := opts.Variable.Get(variable.GetAllVariable(opts.Host))
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
return "", err.Error()
|
||||
}
|
||||
// check failed when condition
|
||||
if len(task.Spec.FailedWhen) > 0 {
|
||||
ok, err := tmpl.ParseBool(lg.(map[string]any), task.Spec.FailedWhen)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "validate FailedWhen condition error", "task", ctrlclient.ObjectKeyFromObject(task))
|
||||
return "", err.Error()
|
||||
}
|
||||
if ok {
|
||||
return "", "failed by failedWhen"
|
||||
}
|
||||
}
|
||||
|
||||
return modules.FindModule(task.Spec.Module.Name)(ctx, opts)
|
||||
}
|
||||
|
||||
// mergeVariable to runtime variable
|
||||
func (e executor) mergeVariable(ctx context.Context, v variable.Variable, vd map[string]any, hosts ...string) error {
|
||||
if len(vd) == 0 {
|
||||
// skip
|
||||
return nil
|
||||
}
|
||||
for _, host := range hosts {
|
||||
if err := v.Merge(variable.MergeRuntimeVariable(host, vd)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,73 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable/source"
|
||||
)
|
||||
|
||||
func newTestOption() (*option, error) {
|
||||
var err error
|
||||
|
||||
o := &option{
|
||||
client: fake.NewClientBuilder().WithScheme(_const.Scheme).WithStatusSubresource(&kkcorev1.Pipeline{}, &kkcorev1alpha1.Task{}).Build(),
|
||||
pipeline: &kkcorev1.Pipeline{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.PipelineSpec{
|
||||
InventoryRef: &corev1.ObjectReference{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
ConfigRef: &corev1.ObjectReference{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
},
|
||||
Status: kkcorev1.PipelineStatus{},
|
||||
},
|
||||
logOutput: os.Stdout,
|
||||
}
|
||||
|
||||
if err := o.client.Create(context.TODO(), &kkcorev1.Inventory{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1.InventorySpec{},
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := o.client.Create(context.TODO(), &kkcorev1.Config{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: runtime.RawExtension{},
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.variable, err = variable.New(context.TODO(), o.client, *o.pipeline, source.MemorySource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,276 @@
|
|||
/*
|
||||
Copyright 2024 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
kkprojectv1 "github.com/kubesphere/kubekey/v4/pkg/apis/project/v1"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/connector"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/project"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable/source"
|
||||
)
|
||||
|
||||
// NewPipelineExecutor return a new pipelineExecutor
|
||||
func NewPipelineExecutor(ctx context.Context, client ctrlclient.Client, pipeline *kkcorev1.Pipeline, logOutput io.Writer) Executor {
|
||||
// get variable
|
||||
v, err := variable.New(ctx, client, *pipeline, source.FileSource)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(nil, "convert playbook error", "pipeline", ctrlclient.ObjectKeyFromObject(pipeline))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return &pipelineExecutor{
|
||||
option: &option{
|
||||
client: client,
|
||||
pipeline: pipeline,
|
||||
variable: v,
|
||||
logOutput: logOutput,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// executor for pipeline
|
||||
type pipelineExecutor struct {
|
||||
*option
|
||||
}
|
||||
|
||||
// Exec pipeline. covert playbook to block and executor it.
|
||||
func (e pipelineExecutor) Exec(ctx context.Context) error {
|
||||
klog.V(5).InfoS("deal project", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
pj, err := project.New(ctx, *e.pipeline, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deal project error: %w", err)
|
||||
}
|
||||
|
||||
// convert to transfer.Playbook struct
|
||||
pb, err := pj.MarshalPlaybook()
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert playbook error: %w", err)
|
||||
}
|
||||
|
||||
for _, play := range pb.Play {
|
||||
// check tags
|
||||
if !play.Taggable.IsEnabled(e.pipeline.Spec.Tags, e.pipeline.Spec.SkipTags) {
|
||||
// if not match the tags. skip
|
||||
continue
|
||||
}
|
||||
// hosts should contain all host's name. hosts should not be empty.
|
||||
var hosts []string
|
||||
if err := e.dealHosts(play.PlayHost, &hosts); err != nil {
|
||||
klog.V(4).ErrorS(err, "deal hosts error, skip this playbook", "hosts", play.PlayHost)
|
||||
|
||||
continue
|
||||
}
|
||||
// when gather_fact is set. get host's information from remote.
|
||||
if err := e.dealGatherFacts(ctx, play.GatherFacts, hosts); err != nil {
|
||||
return fmt.Errorf("deal gather_facts argument error: %w", err)
|
||||
}
|
||||
// Batch execution, with each batch being a group of hosts run in serial.
|
||||
var batchHosts [][]string
|
||||
if err := e.dealSerial(play.Serial.Data, hosts, &batchHosts); err != nil {
|
||||
return fmt.Errorf("deal serial argument error: %w", err)
|
||||
}
|
||||
e.dealRunOnce(play.RunOnce, hosts, &batchHosts)
|
||||
// exec pipeline in each BatchHosts
|
||||
if err := e.execBatchHosts(ctx, play, batchHosts); err != nil {
|
||||
return fmt.Errorf("exec batch hosts error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// execBatchHosts executor block in play order by: "pre_tasks" > "roles" > "tasks" > "post_tasks"
|
||||
func (e pipelineExecutor) execBatchHosts(ctx context.Context, play kkprojectv1.Play, batchHosts [][]string) any {
|
||||
// generate and execute task.
|
||||
for _, serials := range batchHosts {
|
||||
// each batch hosts should not be empty.
|
||||
if len(serials) == 0 {
|
||||
klog.V(5).ErrorS(nil, "Host is empty", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return errors.New("host is empty")
|
||||
}
|
||||
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(play.Vars, serials...)); err != nil {
|
||||
return fmt.Errorf("merge variable error: %w", err)
|
||||
}
|
||||
// generate task from pre tasks
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.PreTasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return fmt.Errorf("execute pre-tasks from play error: %w", err)
|
||||
}
|
||||
// generate task from role
|
||||
for _, role := range play.Roles {
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(role.Vars, serials...)); err != nil {
|
||||
return fmt.Errorf("merge variable error: %w", err)
|
||||
}
|
||||
// use the most closely configuration
|
||||
ignoreErrors := role.IgnoreErrors
|
||||
if ignoreErrors == nil {
|
||||
ignoreErrors = play.IgnoreErrors
|
||||
}
|
||||
// role is block.
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: serials,
|
||||
ignoreErrors: ignoreErrors,
|
||||
blocks: role.Block,
|
||||
role: role.Role,
|
||||
when: role.When.Data,
|
||||
tags: kkprojectv1.JoinTag(role.Taggable, play.Taggable),
|
||||
}.Exec(ctx)); err != nil {
|
||||
return fmt.Errorf("execute role-tasks error: %w", err)
|
||||
}
|
||||
}
|
||||
// generate task from tasks
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.Tasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return fmt.Errorf("execute tasks error: %w", err)
|
||||
}
|
||||
// generate task from post tasks
|
||||
if err := (blockExecutor{
|
||||
option: e.option,
|
||||
hosts: serials,
|
||||
ignoreErrors: play.IgnoreErrors,
|
||||
blocks: play.Tasks,
|
||||
tags: play.Taggable,
|
||||
}.Exec(ctx)); err != nil {
|
||||
return fmt.Errorf("execute post-tasks error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealHosts "hosts" argument in playbook. get hostname from kkprojectv1.PlayHost
|
||||
func (e pipelineExecutor) dealHosts(host kkprojectv1.PlayHost, i *[]string) error {
|
||||
ahn, err := e.variable.Get(variable.GetHostnames(host.Hosts))
|
||||
if err != nil {
|
||||
return fmt.Errorf("getHostnames error: %w", err)
|
||||
}
|
||||
|
||||
if h, ok := ahn.([]string); ok {
|
||||
*i = h
|
||||
}
|
||||
if len(*i) == 0 { // if hosts is empty skip this playbook
|
||||
return errors.New("hosts is empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealGatherFacts "gather_facts" argument in playbook. get host remote info and merge to variable
|
||||
func (e pipelineExecutor) dealGatherFacts(ctx context.Context, gatherFacts bool, hosts []string) error {
|
||||
if !gatherFacts {
|
||||
// skip
|
||||
return nil
|
||||
}
|
||||
|
||||
dealGatherFactsInHost := func(hostname string) error {
|
||||
v, err := e.variable.Get(variable.GetParamVariable(hostname))
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "get host variable error", "hostname", hostname)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
connectorVars := make(map[string]any)
|
||||
if c1, ok := v.(map[string]any)[_const.VariableConnector]; ok {
|
||||
if c2, ok := c1.(map[string]any); ok {
|
||||
connectorVars = c2
|
||||
}
|
||||
}
|
||||
// get host connector
|
||||
conn, err := connector.NewConnector(hostname, connectorVars)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "new connector error", "hostname", hostname)
|
||||
|
||||
return err
|
||||
}
|
||||
if err := conn.Init(ctx); err != nil {
|
||||
klog.V(5).ErrorS(err, "init connection error", "hostname", hostname)
|
||||
|
||||
return err
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
|
||||
if gf, ok := conn.(connector.GatherFacts); ok {
|
||||
remoteInfo, err := gf.HostInfo(ctx)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "gatherFacts from connector error", "hostname", hostname)
|
||||
|
||||
return err
|
||||
}
|
||||
if err := e.variable.Merge(variable.MergeRemoteVariable(remoteInfo, hostname)); err != nil {
|
||||
klog.V(5).ErrorS(err, "merge gather fact error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "host", hostname)
|
||||
|
||||
return fmt.Errorf("merge gather fact error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hostname := range hosts {
|
||||
if err := dealGatherFactsInHost(hostname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealSerial "serial" argument in playbook.
|
||||
func (e pipelineExecutor) dealSerial(serial []any, hosts []string, batchHosts *[][]string) error {
|
||||
var err error
|
||||
*batchHosts, err = converter.GroupHostBySerial(hosts, serial)
|
||||
if err != nil {
|
||||
return fmt.Errorf("group host by serial error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dealRunOnce argument in playbook. if RunOnce is true. it's always only run in the first hosts.
|
||||
func (e pipelineExecutor) dealRunOnce(runOnce bool, hosts []string, batchHosts *[][]string) {
|
||||
if runOnce {
|
||||
// runOnce only run in first node
|
||||
*batchHosts = [][]string{{hosts[0]}}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPipelineExecutor_DealRunOnce(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
runOnce bool
|
||||
hosts []string
|
||||
batchHosts [][]string
|
||||
except [][]string
|
||||
}{
|
||||
{
|
||||
name: "runonce is false",
|
||||
runOnce: false,
|
||||
batchHosts: [][]string{{"node1", "node2"}},
|
||||
except: [][]string{{"node1", "node2"}},
|
||||
},
|
||||
{
|
||||
name: "runonce is true",
|
||||
runOnce: true,
|
||||
hosts: []string{"node1"},
|
||||
batchHosts: [][]string{{"node1", "node2"}},
|
||||
except: [][]string{{"node1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pipelineExecutor{}.dealRunOnce(tc.runOnce, tc.hosts, &tc.batchHosts)
|
||||
assert.Equal(t, tc.batchHosts, tc.except)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,342 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/modules"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
)
|
||||
|
||||
type taskExecutor struct {
|
||||
*option
|
||||
task *kkcorev1alpha1.Task
|
||||
}
|
||||
|
||||
// Exec and store Task
|
||||
func (e taskExecutor) Exec(ctx context.Context) error {
|
||||
// create task
|
||||
if err := e.client.Create(ctx, e.task); err != nil {
|
||||
klog.V(5).ErrorS(err, "create task error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
e.pipeline.Status.TaskResult.Total++
|
||||
switch e.task.Status.Phase {
|
||||
case kkcorev1alpha1.TaskPhaseSuccess:
|
||||
e.pipeline.Status.TaskResult.Success++
|
||||
case kkcorev1alpha1.TaskPhaseIgnored:
|
||||
e.pipeline.Status.TaskResult.Ignored++
|
||||
case kkcorev1alpha1.TaskPhaseFailed:
|
||||
e.pipeline.Status.TaskResult.Failed++
|
||||
}
|
||||
}()
|
||||
|
||||
for !e.task.IsComplete() {
|
||||
var roleLog string
|
||||
if e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] != "" {
|
||||
roleLog = "[" + e.task.Annotations[kkcorev1alpha1.TaskAnnotationRole] + "] "
|
||||
}
|
||||
klog.V(5).InfoS("begin run task", "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
fmt.Fprintf(e.logOutput, "%s %s%s\n", time.Now().Format(time.TimeOnly+" MST"), roleLog, e.task.Spec.Name)
|
||||
// exec task
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseRunning
|
||||
if err := e.client.Status().Update(ctx, e.task); err != nil {
|
||||
klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
}
|
||||
e.execTask(ctx)
|
||||
if err := e.client.Status().Update(ctx, e.task); err != nil {
|
||||
klog.V(5).ErrorS(err, "update task status error", "task", ctrlclient.ObjectKeyFromObject(e.task), "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline))
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
// exit when task run failed
|
||||
if e.task.IsFailed() {
|
||||
var hostReason []kkcorev1.PipelineFailedDetailHost
|
||||
for _, tr := range e.task.Status.HostResults {
|
||||
hostReason = append(hostReason, kkcorev1.PipelineFailedDetailHost{
|
||||
Host: tr.Host,
|
||||
Stdout: tr.Stdout,
|
||||
StdErr: tr.StdErr,
|
||||
})
|
||||
}
|
||||
e.pipeline.Status.FailedDetail = append(e.pipeline.Status.FailedDetail, kkcorev1.PipelineFailedDetail{
|
||||
Task: e.task.Spec.Name,
|
||||
Hosts: hostReason,
|
||||
})
|
||||
e.pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
|
||||
|
||||
return fmt.Errorf("task %s run failed", e.task.Spec.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// execTask
|
||||
func (e taskExecutor) execTask(ctx context.Context) {
|
||||
// check task host results
|
||||
wg := &wait.Group{}
|
||||
e.task.Status.HostResults = make([]kkcorev1alpha1.TaskHostResult, len(e.task.Spec.Hosts))
|
||||
for i, h := range e.task.Spec.Hosts {
|
||||
wg.StartWithContext(ctx, e.execTaskHost(i, h))
|
||||
}
|
||||
wg.Wait()
|
||||
// host result for task
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseSuccess
|
||||
for _, data := range e.task.Status.HostResults {
|
||||
if data.StdErr != "" {
|
||||
if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseIgnored
|
||||
} else {
|
||||
e.task.Status.Phase = kkcorev1alpha1.TaskPhaseFailed
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execTaskHost deal module in each host parallel.
|
||||
func (e taskExecutor) execTaskHost(i int, h string) func(ctx context.Context) {
|
||||
return func(ctx context.Context) {
|
||||
// task result
|
||||
var stdout, stderr string
|
||||
defer func() {
|
||||
if err := e.dealRegister(stdout, stderr, h); err != nil {
|
||||
stderr = err.Error()
|
||||
}
|
||||
if stderr != "" && e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError {
|
||||
klog.V(5).ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
} else if stderr != "" {
|
||||
klog.ErrorS(nil, "task run failed", "host", h, "stdout", stdout, "stderr", stderr, "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
}
|
||||
// fill result
|
||||
e.task.Status.HostResults[i] = kkcorev1alpha1.TaskHostResult{
|
||||
Host: h,
|
||||
Stdout: stdout,
|
||||
StdErr: stderr,
|
||||
}
|
||||
}()
|
||||
// task log
|
||||
deferFunc := e.execTaskHostLogs(ctx, h, &stdout, &stderr)
|
||||
defer deferFunc()
|
||||
// task execute
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(h))
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("get variable error: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
// convert hostVariable to map
|
||||
had, ok := ha.(map[string]any)
|
||||
if !ok {
|
||||
stderr = fmt.Sprintf("variable is not map error: %v", err)
|
||||
}
|
||||
// check when condition
|
||||
if skip := e.dealWhen(had, &stdout, &stderr); skip {
|
||||
return
|
||||
}
|
||||
// execute module in loop with loop item.
|
||||
// if loop is empty. execute once, and the item is null
|
||||
for _, item := range e.dealLoop(had) {
|
||||
// set item to runtime variable
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
|
||||
_const.VariableItem: item,
|
||||
}, h)); err != nil {
|
||||
stderr = fmt.Sprintf("set loop item to variable error: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
e.executeModule(ctx, e.task, h, &stdout, &stderr)
|
||||
// delete item
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
|
||||
_const.VariableItem: nil,
|
||||
}, h)); err != nil {
|
||||
stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execTaskHostLogs logs for each host
|
||||
func (e taskExecutor) execTaskHostLogs(ctx context.Context, h string, stdout, stderr *string) func() {
|
||||
// placeholder format task log
|
||||
var placeholder string
|
||||
if hostNameMaxLen, err := e.variable.Get(variable.GetHostMaxLength()); err == nil {
|
||||
if hl, ok := hostNameMaxLen.(int); ok {
|
||||
placeholder = strings.Repeat(" ", hl-len(h))
|
||||
}
|
||||
}
|
||||
// progress bar for task
|
||||
var bar = progressbar.NewOptions(-1,
|
||||
progressbar.OptionSetWriter(e.logOutput),
|
||||
progressbar.OptionSpinnerCustom([]string{" "}),
|
||||
progressbar.OptionEnableColorCodes(true),
|
||||
progressbar.OptionSetDescription(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[36mrunning\033[0m", h, placeholder)),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
if _, err := os.Stdout.WriteString("\n"); err != nil {
|
||||
klog.ErrorS(err, "failed to write output", "host", h)
|
||||
}
|
||||
}),
|
||||
)
|
||||
// run progress
|
||||
go func() {
|
||||
err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(context.Context) (bool, error) {
|
||||
if bar.IsFinished() {
|
||||
return true, nil
|
||||
}
|
||||
if err := bar.Add(1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to wait for task run to finish", "host", h)
|
||||
}
|
||||
}()
|
||||
|
||||
return func() {
|
||||
switch {
|
||||
case *stderr != "":
|
||||
if e.task.Spec.IgnoreError != nil && *e.task.Spec.IgnoreError { // ignore
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mignore \033[0m", h, placeholder))
|
||||
} else { // failed
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[31mfailed \033[0m", h, placeholder))
|
||||
}
|
||||
case *stdout == modules.StdoutSkip: // skip
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34mskip \033[0m", h, placeholder))
|
||||
default: //success
|
||||
bar.Describe(fmt.Sprintf("[\033[36m%s\033[0m]%s \033[34msuccess\033[0m", h, placeholder))
|
||||
}
|
||||
if err := bar.Finish(); err != nil {
|
||||
klog.ErrorS(err, "finish bar error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execLoop parse loop to item slice and execute it. if loop contains template string. convert it.
|
||||
// loop is json string. try convertor to string slice by json.
|
||||
// loop is normal string. set it to empty slice and return.
|
||||
func (e taskExecutor) dealLoop(ha map[string]any) []any {
|
||||
var items []any
|
||||
switch {
|
||||
case e.task.Spec.Loop.Raw == nil:
|
||||
// loop is not set. add one element to execute once module.
|
||||
items = []any{nil}
|
||||
default:
|
||||
items = variable.Extension2Slice(ha, e.task.Spec.Loop)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// executeModule find register module and execute it in a single host.
|
||||
func (e taskExecutor) executeModule(ctx context.Context, task *kkcorev1alpha1.Task, host string, stdout, stderr *string) {
|
||||
// get all variable. which contains item.
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(host))
|
||||
if err != nil {
|
||||
*stderr = fmt.Sprintf("failed to get host %s variable: %v", host, err)
|
||||
|
||||
return
|
||||
}
|
||||
// convert hostVariable to map
|
||||
had, ok := ha.(map[string]any)
|
||||
if !ok {
|
||||
*stderr = fmt.Sprintf("host: %s variable is not a map", host)
|
||||
|
||||
return
|
||||
}
|
||||
// check failed when condition
|
||||
if skip := e.dealFailedWhen(had, stdout, stderr); skip {
|
||||
return
|
||||
}
|
||||
*stdout, *stderr = modules.FindModule(task.Spec.Module.Name)(ctx, modules.ExecOptions{
|
||||
Args: e.task.Spec.Module.Args,
|
||||
Host: host,
|
||||
Variable: e.variable,
|
||||
Task: *e.task,
|
||||
Pipeline: *e.pipeline,
|
||||
})
|
||||
}
|
||||
|
||||
// dealWhen "when" argument in task.
|
||||
func (e taskExecutor) dealWhen(had map[string]any, stdout, stderr *string) bool {
|
||||
if len(e.task.Spec.When) > 0 {
|
||||
ok, err := tmpl.ParseBool(had, e.task.Spec.When)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "validate when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
*stderr = fmt.Sprintf("parse when condition error: %v", err)
|
||||
|
||||
return true
|
||||
}
|
||||
if !ok {
|
||||
*stdout = modules.StdoutSkip
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// dealFailedWhen "failed_when" argument in task.
|
||||
func (e taskExecutor) dealFailedWhen(had map[string]any, stdout, stderr *string) bool {
|
||||
if len(e.task.Spec.FailedWhen) > 0 {
|
||||
ok, err := tmpl.ParseBool(had, e.task.Spec.FailedWhen)
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(err, "validate failed_when condition error", "task", ctrlclient.ObjectKeyFromObject(e.task))
|
||||
*stderr = fmt.Sprintf("parse failed_when condition error: %v", err)
|
||||
|
||||
return true
|
||||
}
|
||||
if ok {
|
||||
*stdout = modules.StdoutSkip
|
||||
*stderr = "reach failed_when, failed"
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// dealRegister "register" argument in task.
|
||||
func (e taskExecutor) dealRegister(stdout, stderr, host string) error {
|
||||
if e.task.Spec.Register != "" {
|
||||
var stdoutResult any = stdout
|
||||
var stderrResult any = stderr
|
||||
// try to convert by json
|
||||
_ = json.Unmarshal([]byte(stdout), &stdoutResult)
|
||||
// try to convert by json
|
||||
_ = json.Unmarshal([]byte(stderr), &stderrResult)
|
||||
// set variable to parent location
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(map[string]any{
|
||||
e.task.Spec.Register: map[string]any{
|
||||
"stdout": stdoutResult,
|
||||
"stderr": stderrResult,
|
||||
},
|
||||
}, host)); err != nil {
|
||||
return fmt.Errorf("register task result to variable error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1"
|
||||
)
|
||||
|
||||
func TestTaskExecutor(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
task *kkcorev1alpha1.Task
|
||||
}{
|
||||
{
|
||||
name: "debug module in single host",
|
||||
task: &kkcorev1alpha1.Task{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1alpha1.TaskSpec{
|
||||
Hosts: []string{"node1"},
|
||||
Module: kkcorev1alpha1.Module{
|
||||
Name: "debug",
|
||||
Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)},
|
||||
},
|
||||
},
|
||||
Status: kkcorev1alpha1.TaskStatus{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "debug module in multiple hosts",
|
||||
task: &kkcorev1alpha1.Task{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: corev1.NamespaceDefault,
|
||||
},
|
||||
Spec: kkcorev1alpha1.TaskSpec{
|
||||
Hosts: []string{"node1", "n2"},
|
||||
Module: kkcorev1alpha1.Module{
|
||||
Name: "debug",
|
||||
Args: runtime.RawExtension{Raw: []byte(`{"msg":"hello"}`)},
|
||||
},
|
||||
},
|
||||
Status: kkcorev1alpha1.TaskStatus{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
o, err := newTestOption()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := (&taskExecutor{
|
||||
option: o,
|
||||
task: tc.task,
|
||||
}).Exec(context.TODO()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -41,6 +41,7 @@ type commandManager struct {
|
|||
logOutput io.Writer
|
||||
}
|
||||
|
||||
// Run command Manager. print log and run pipeline executor.
|
||||
func (m *commandManager) Run(ctx context.Context) error {
|
||||
fmt.Fprint(m.logOutput, `
|
||||
|
||||
|
|
@ -66,7 +67,8 @@ func (m *commandManager) Run(ctx context.Context) error {
|
|||
klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir())
|
||||
}
|
||||
}
|
||||
if m.Pipeline.Spec.JobSpec.Schedule != "" { // if pipeline is cornJob. it's always running.
|
||||
// if pipeline is cornJob. it's always running.
|
||||
if m.Pipeline.Spec.JobSpec.Schedule != "" {
|
||||
m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseRunning
|
||||
}
|
||||
// update pipeline status
|
||||
|
|
@ -76,10 +78,11 @@ func (m *commandManager) Run(ctx context.Context) error {
|
|||
}()
|
||||
|
||||
m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseSucceed
|
||||
if err := executor.NewTaskExecutor(m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil {
|
||||
if err := executor.NewPipelineExecutor(ctx, m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil {
|
||||
klog.ErrorS(err, "executor tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
|
||||
m.Pipeline.Status.Phase = kkcorev1.PipelinePhaseFailed
|
||||
m.Pipeline.Status.Reason = err.Error()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ type controllerManager struct {
|
|||
LeaderElection bool
|
||||
}
|
||||
|
||||
// Run controllerManager, run controller in kubernetes
|
||||
func (c controllerManager) Run(ctx context.Context) error {
|
||||
ctrl.SetLogger(klog.NewKlogr())
|
||||
restconfig, err := ctrl.GetConfig()
|
||||
|
|
@ -45,6 +46,7 @@ func (c controllerManager) Run(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not get rest config: %w", err)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(restconfig, ctrl.Options{
|
||||
Scheme: _const.Scheme,
|
||||
LeaderElection: c.LeaderElection,
|
||||
|
|
@ -61,6 +63,7 @@ func (c controllerManager) Run(ctx context.Context) error {
|
|||
MaxConcurrentReconciles: c.MaxConcurrentReconciles,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
klog.ErrorS(err, "create pipeline controller error")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ type Manager interface {
|
|||
Run(ctx context.Context) error
|
||||
}
|
||||
|
||||
// CommandManagerOptions for NewCommandManager
|
||||
type CommandManagerOptions struct {
|
||||
*kkcorev1.Pipeline
|
||||
*kkcorev1.Config
|
||||
|
|
@ -39,6 +40,7 @@ type CommandManagerOptions struct {
|
|||
ctrlclient.Client
|
||||
}
|
||||
|
||||
// NewCommandManager return a new commandManager
|
||||
func NewCommandManager(o CommandManagerOptions) Manager {
|
||||
return &commandManager{
|
||||
Pipeline: o.Pipeline,
|
||||
|
|
@ -49,11 +51,13 @@ func NewCommandManager(o CommandManagerOptions) Manager {
|
|||
}
|
||||
}
|
||||
|
||||
// ControllerManagerOptions for NewControllerManager
|
||||
type ControllerManagerOptions struct {
|
||||
MaxConcurrentReconciles int
|
||||
LeaderElection bool
|
||||
}
|
||||
|
||||
// NewControllerManager return a new controllerManager
|
||||
func NewControllerManager(o ControllerManagerOptions) Manager {
|
||||
return &controllerManager{
|
||||
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
|
||||
|
|
|
|||
|
|
@ -18,54 +18,89 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
|
||||
"github.com/kubesphere/kubekey/v4/pkg/variable"
|
||||
)
|
||||
|
||||
type assertArgs struct {
|
||||
that []string
|
||||
successMsg string
|
||||
failMsg string // high priority than msg
|
||||
msg string
|
||||
}
|
||||
|
||||
func newAssertArgs(_ context.Context, raw runtime.RawExtension, vars map[string]any) (*assertArgs, error) {
|
||||
var err error
|
||||
aa := &assertArgs{}
|
||||
args := variable.Extension2Variables(raw)
|
||||
if aa.that, err = variable.StringSliceVar(vars, args, "that"); err != nil {
|
||||
return nil, errors.New("\"that\" should be []string or string")
|
||||
}
|
||||
aa.successMsg, _ = variable.StringVar(vars, args, "success_msg")
|
||||
if aa.successMsg == "" {
|
||||
aa.successMsg = StdoutTrue
|
||||
}
|
||||
aa.failMsg, _ = variable.StringVar(vars, args, "fail_msg")
|
||||
aa.msg, _ = variable.StringVar(vars, args, "msg")
|
||||
if aa.msg == "" {
|
||||
aa.msg = StdoutFalse
|
||||
}
|
||||
|
||||
return aa, nil
|
||||
}
|
||||
|
||||
// ModuleAssert deal "assert" module
|
||||
func ModuleAssert(ctx context.Context, options ExecOptions) (string, string) {
|
||||
// get host variable
|
||||
ha, err := options.Variable.Get(variable.GetAllVariable(options.Host))
|
||||
ha, err := options.getAllVariables()
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("failed to get host variable: %v", err)
|
||||
return "", err.Error()
|
||||
}
|
||||
|
||||
args := variable.Extension2Variables(options.Args)
|
||||
thatParam, err := variable.StringSliceVar(ha.(map[string]any), args, "that")
|
||||
aa, err := newAssertArgs(ctx, options.Args, ha)
|
||||
if err != nil {
|
||||
return "", "\"that\" should be []string or string"
|
||||
klog.V(4).ErrorS(err, "get assert args error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
|
||||
|
||||
return "", err.Error()
|
||||
}
|
||||
|
||||
ok, err := tmpl.ParseBool(ha.(map[string]any), thatParam)
|
||||
ok, err := tmpl.ParseBool(ha, aa.that)
|
||||
if err != nil {
|
||||
return "", fmt.Sprintf("parse \"that\" error: %v", err)
|
||||
}
|
||||
|
||||
// condition is true
|
||||
if ok {
|
||||
if successMsgParam, err := variable.StringVar(ha.(map[string]any), args, "success_msg"); err == nil {
|
||||
if r, err := tmpl.ParseString(ha.(map[string]any), successMsgParam); err != nil {
|
||||
return "", fmt.Sprintf("parse \"success_msg\" error: %v", err)
|
||||
} else {
|
||||
return r, ""
|
||||
}
|
||||
r, err := tmpl.ParseString(ha, aa.successMsg)
|
||||
if err == nil {
|
||||
return r, ""
|
||||
}
|
||||
return stdoutTrue, ""
|
||||
} else {
|
||||
if failMsgParam, err := variable.StringVar(ha.(map[string]any), args, "fail_msg"); err == nil {
|
||||
if r, err := tmpl.ParseString(ha.(map[string]any), failMsgParam); err != nil {
|
||||
return "", fmt.Sprintf("parse \"fail_msg\" error: %v", err)
|
||||
} else {
|
||||
return stdoutFalse, r
|
||||
}
|
||||
}
|
||||
if msgParam, err := variable.StringVar(ha.(map[string]any), args, "msg"); err == nil {
|
||||
if r, err := tmpl.ParseString(ha.(map[string]any), msgParam); err != nil {
|
||||
return "", fmt.Sprintf("parse \"msg\" error: %v", err)
|
||||
} else {
|
||||
return stdoutFalse, r
|
||||
}
|
||||
}
|
||||
return stdoutFalse, "False"
|
||||
klog.V(4).ErrorS(err, "parse \"success_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
|
||||
|
||||
return StdoutTrue, ""
|
||||
}
|
||||
// condition is false and fail_msg is not empty
|
||||
if aa.failMsg != "" {
|
||||
r, err := tmpl.ParseString(ha, aa.failMsg)
|
||||
if err == nil {
|
||||
return StdoutFalse, r
|
||||
}
|
||||
klog.V(4).ErrorS(err, "parse \"fail_msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
|
||||
}
|
||||
// condition is false and msg is not empty
|
||||
if aa.msg != "" {
|
||||
r, err := tmpl.ParseString(ha, aa.msg)
|
||||
if err == nil {
|
||||
return StdoutFalse, r
|
||||
}
|
||||
klog.V(4).ErrorS(err, "parse \"msg\" error", "task", ctrlclient.ObjectKeyFromObject(&options.Task))
|
||||
}
|
||||
|
||||
return StdoutFalse, "False"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
exceptStdout: stdoutTrue,
|
||||
exceptStdout: StdoutTrue,
|
||||
},
|
||||
{
|
||||
name: "success with success_msg",
|
||||
|
|
@ -86,7 +86,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
exceptStdout: stdoutFalse,
|
||||
exceptStdout: StdoutFalse,
|
||||
exceptStderr: "False",
|
||||
},
|
||||
{
|
||||
|
|
@ -103,7 +103,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
exceptStdout: stdoutFalse,
|
||||
exceptStdout: StdoutFalse,
|
||||
exceptStderr: "failed v2",
|
||||
},
|
||||
}
|
||||
|
|
@ -112,6 +112,7 @@ func TestAssert(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
|
||||
acStdout, acStderr := ModuleAssert(ctx, tc.opt)
|
||||
assert.Equal(t, tc.exceptStdout, acStdout)
|
||||
assert.Equal(t, tc.exceptStderr, acStderr)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue