mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
1. support docker's data-root config
2. add timeout and ignore error when drain node Signed-off-by: wwt <wenwutang@yunify.com>
This commit is contained in:
parent
2222db2bd6
commit
a71c98b6e5
|
|
@ -176,6 +176,7 @@ type RegistryConfig struct {
|
|||
RegistryMirrors []string `yaml:"registryMirrors" json:"registryMirrors,omitempty"`
|
||||
InsecureRegistries []string `yaml:"insecureRegistries" json:"insecureRegistries,omitempty"`
|
||||
PrivateRegistry string `yaml:"privateRegistry" json:"privateRegistry,omitempty"`
|
||||
DataRoot string `yaml:"dataRoot" json:"dataRoot,omitempty"`
|
||||
NamespaceOverride string `yaml:"namespaceOverride" json:"namespaceOverride,omitempty"`
|
||||
Auths runtime.RawExtension `yaml:"auths" json:"auths,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -102,6 +102,7 @@ func InstallDocker(m *InstallContainerModule) []task.Interface {
|
|||
Data: util.Data{
|
||||
"Mirrors": templates.Mirrors(m.KubeConf),
|
||||
"InsecureRegistries": templates.InsecureRegistries(m.KubeConf),
|
||||
"DataRoot": templates.DataRoot(m.KubeConf),
|
||||
},
|
||||
},
|
||||
Parallel: true,
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ var DockerConfig = template.Must(template.New("daemon.json").Parse(
|
|||
"max-size": "5m",
|
||||
"max-file":"3"
|
||||
},
|
||||
{{- if .DataRoot }}
|
||||
"data-root": {{ .DataRoot }},
|
||||
{{- end}}
|
||||
{{- if .Mirrors }}
|
||||
"registry-mirrors": [{{ .Mirrors }}],
|
||||
{{- end}}
|
||||
|
|
@ -64,3 +67,11 @@ func InsecureRegistries(kubeConf *common.KubeConf) string {
|
|||
}
|
||||
return insecureRegistries
|
||||
}
|
||||
|
||||
func DataRoot(kubeConf *common.KubeConf) string {
|
||||
var dataRoot string
|
||||
if kubeConf.Cluster.Registry.DataRoot != "" {
|
||||
dataRoot = fmt.Sprintf("\"%s\"", kubeConf.Cluster.Registry.DataRoot)
|
||||
}
|
||||
return dataRoot
|
||||
}
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@ func (d *DeleteKubeNodeModule) Init() {
|
|||
Hosts: d.Runtime.GetHostsByRole(common.Master),
|
||||
Prepare: new(common.OnlyFirstMaster),
|
||||
Action: new(DrainNode),
|
||||
Retry: 5,
|
||||
Retry: 2,
|
||||
}
|
||||
|
||||
deleteNode := &task.RemoteTask{
|
||||
|
|
|
|||
|
|
@ -533,7 +533,7 @@ func (d *DrainNode) Execute(runtime connector.Runtime) error {
|
|||
return errors.New("get dstNode failed by pipeline cache")
|
||||
}
|
||||
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf(
|
||||
"/usr/local/bin/kubectl drain %s --delete-emptydir-data --ignore-daemonsets", nodeName),
|
||||
"/usr/local/bin/kubectl drain %s --delete-emptydir-data --ignore-daemonsets --timeout=2m --force", nodeName),
|
||||
true); err != nil {
|
||||
return errors.Wrap(err, "drain the node failed")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ func GetKubeletConfiguration(runtime connector.Runtime, kubeConf *common.KubeCon
|
|||
}
|
||||
|
||||
if kubeConf.Arg.Debug {
|
||||
logger.Log.Debug("Set kubeletConfiguration: %v", kubeletConfiguration)
|
||||
logger.Log.Debugf("Set kubeletConfiguration: %v", kubeletConfiguration)
|
||||
}
|
||||
|
||||
return kubeletConfiguration
|
||||
|
|
|
|||
Loading…
Reference in New Issue