mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
fix: join control-plane error (#2321)
* fix: var_files defined error Signed-off-by: joyceliu <joyceliu@yunify.com> * fix: rescue is not exec. Signed-off-by: joyceliu <joyceliu@yunify.com> * fix: join control-plane error. Signed-off-by: joyceliu <joyceliu@yunify.com> --------- Signed-off-by: joyceliu <joyceliu@yunify.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
parent
b6c7ea291f
commit
de9a34ff38
|
|
@ -17,7 +17,7 @@ kubernetes:
|
|||
# the first value is ipv4_cidr, the last value is ipv6_cidr.
|
||||
pod_cidr: 10.233.64.0/18
|
||||
service_cidr: 10.233.0.0/18
|
||||
dns_image: "{{ k8s_registry }}/coredns/coredns:v1.11.1"
|
||||
dns_image: "{{ k8s_registry }}/coredns/coredns:v1.8.6"
|
||||
dns_cache_image: "{{ dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20"
|
||||
dns_service_ip: "{{ kubernetes.networking.service_cidr|ip_range|slice:':3'|last }}"
|
||||
# Specify a stable IP address or DNS name for the control plane.
|
||||
|
|
|
|||
|
|
@ -75,3 +75,14 @@
|
|||
cp -f /etc/kubernetes/admin.conf /root/.kube/config
|
||||
when: kube_node_info_important.stderr != ""
|
||||
|
||||
- name: Set to worker node
|
||||
when: inventory_name in groups["kube_worker"]
|
||||
block:
|
||||
- name: Remote master taint
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
command: |
|
||||
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
|
||||
rescue:
|
||||
- name: Reset kubeadm if init failed
|
||||
- name: Reset kubeadm if join failed
|
||||
command: kubeadm reset -f {% if (cri.cri_socket|defined && cri.cri_socket != "") %}--cri-socket {{ cri.cri_socket }}{% endif %}
|
||||
|
||||
- name: Sync kubeconfig to remote
|
||||
|
|
@ -25,14 +25,14 @@
|
|||
src: "{{ work_dir }}/kubekey/kubeconfig"
|
||||
dest: /root/.kube/config
|
||||
|
||||
- name: Remote master taint
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Set to worker node
|
||||
when: inventory_name in groups["kube_worker"]
|
||||
|
||||
- name: Add work label
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
|
||||
when: inventory_name in groups['kube_worker']
|
||||
block:
|
||||
- name: Remote master taint
|
||||
ignore_errors: true
|
||||
command: |
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
|
||||
/usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
|
||||
- name: Add work label
|
||||
command: |
|
||||
/usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
|
||||
|
|
|
|||
|
|
@ -26,14 +26,14 @@
|
|||
fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: "{{ work_dir }}/kubekey/kubeconfig"
|
||||
- name: Generate certificate key
|
||||
block:
|
||||
- name: Generate certificate key by kubeadm
|
||||
command: /usr/local/bin/kubeadm certs certificate-key
|
||||
register: kubeadm_cert_result
|
||||
- name: Set_Fact certificate key to all hosts
|
||||
set_fact:
|
||||
kubeadm_cert: "{{ kubeadm_cert_result.stdout }}"
|
||||
- name: Generate certificate key by kubeadm
|
||||
command: |
|
||||
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \
|
||||
| awk '/Using certificate key:/{getline; print}'
|
||||
register: kubeadm_cert_result
|
||||
- name: Set_Fact certificate key to all hosts
|
||||
set_fact:
|
||||
kubeadm_cert: "{{ kubeadm_cert_result.stdout }}"
|
||||
- name: Generate kubeadm token
|
||||
block:
|
||||
- name: Generate token by kubeadm
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ discovery:
|
|||
apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
|
||||
token: "{{ kubeadm_token }}"
|
||||
unsafeSkipCAVerification: true
|
||||
tlsBootstrapToken: "{{ kubeadm_token }}"
|
||||
{% if (inventory_name in groups['kube_control_plane']) %}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ discovery:
|
|||
apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
|
||||
token: "{{ kubeadm_token }}"
|
||||
unsafeSkipCAVerification: true
|
||||
tlsBootstrapToken: "{{ kubeadm_token }}"
|
||||
{% if (inventory_name in groups['kube_control_plane']) %}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
|
|
|
|||
|
|
@ -58,15 +58,7 @@ type Module struct {
|
|||
type TaskStatus struct {
|
||||
RestartCount int `json:"restartCount,omitempty"`
|
||||
Phase TaskPhase `json:"phase,omitempty"`
|
||||
Conditions []TaskCondition `json:"conditions,omitempty"`
|
||||
HostResults []TaskHostResult `json:"failedDetail,omitempty"`
|
||||
}
|
||||
|
||||
type TaskCondition struct {
|
||||
StartTimestamp metav1.Time `json:"startTimestamp,omitempty"`
|
||||
EndTimestamp metav1.Time `json:"endTimestamp,omitempty"`
|
||||
// HostResults of runtime.RawExtension host. the key is host name. value is host result
|
||||
HostResults []TaskHostResult `json:"hostResults,omitempty"`
|
||||
HostResults []TaskHostResult `json:"hostResults,omitempty"`
|
||||
}
|
||||
|
||||
type TaskHostResult struct {
|
||||
|
|
|
|||
|
|
@ -104,28 +104,6 @@ func (in *Task) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TaskCondition) DeepCopyInto(out *TaskCondition) {
|
||||
*out = *in
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.EndTimestamp.DeepCopyInto(&out.EndTimestamp)
|
||||
if in.HostResults != nil {
|
||||
in, out := &in.HostResults, &out.HostResults
|
||||
*out = make([]TaskHostResult, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskCondition.
|
||||
func (in *TaskCondition) DeepCopy() *TaskCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TaskCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TaskHostResult) DeepCopyInto(out *TaskHostResult) {
|
||||
*out = *in
|
||||
|
|
@ -176,13 +154,6 @@ func (in *TaskList) DeepCopyObject() runtime.Object {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TaskStatus) DeepCopyInto(out *TaskStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]TaskCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.HostResults != nil {
|
||||
in, out := &in.HostResults, &out.HostResults
|
||||
*out = make([]TaskHostResult, len(*in))
|
||||
|
|
|
|||
|
|
@ -18,11 +18,11 @@ package executor
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
|
@ -252,6 +252,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
|
|||
|
||||
switch {
|
||||
case len(at.Block) != 0:
|
||||
var errs error
|
||||
// exec block
|
||||
if err := e.execBlock(ctx, execBlockOptions{
|
||||
hosts: hosts,
|
||||
|
|
@ -262,7 +263,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
|
|||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(4).ErrorS(err, "execute tasks from block error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
// if block exec failed exec rescue
|
||||
|
|
@ -276,7 +277,7 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
|
|||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(4).ErrorS(err, "execute tasks from rescue error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -291,10 +292,15 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
|
|||
tags: tags,
|
||||
}); err != nil {
|
||||
klog.V(4).ErrorS(err, "execute tasks from always error", "pipeline", ctrlclient.ObjectKeyFromObject(e.pipeline), "block", at.Name)
|
||||
return err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// when execute error. return
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
|
||||
case at.IncludeTasks != "":
|
||||
// include tasks has converted to blocks.
|
||||
// do nothing
|
||||
|
|
@ -384,19 +390,10 @@ func (e executor) execBlock(ctx context.Context, options execBlockOptions) error
|
|||
}
|
||||
|
||||
func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, options execBlockOptions) error {
|
||||
cd := kubekeyv1alpha1.TaskCondition{
|
||||
StartTimestamp: metav1.Now(),
|
||||
}
|
||||
defer func() {
|
||||
cd.EndTimestamp = metav1.Now()
|
||||
task.Status.Conditions = append(task.Status.Conditions, cd)
|
||||
}()
|
||||
|
||||
// check task host results
|
||||
wg := &wait.Group{}
|
||||
dataChan := make(chan kubekeyv1alpha1.TaskHostResult, len(task.Spec.Hosts))
|
||||
for _, h := range task.Spec.Hosts {
|
||||
host := h
|
||||
task.Status.HostResults = make([]kubekeyv1alpha1.TaskHostResult, len(task.Spec.Hosts))
|
||||
for i, h := range task.Spec.Hosts {
|
||||
wg.StartWithContext(ctx, func(ctx context.Context) {
|
||||
var stdout, stderr string
|
||||
|
||||
|
|
@ -421,7 +418,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
// try to convert by json
|
||||
_ = json.Unmarshal([]byte(stderr), &stderrResult)
|
||||
// set variable to parent location
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
task.Spec.Register: map[string]any{
|
||||
"stdout": stdoutResult,
|
||||
"stderr": stderrResult,
|
||||
|
|
@ -452,14 +449,19 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
}
|
||||
|
||||
// fill result
|
||||
dataChan <- kubekeyv1alpha1.TaskHostResult{
|
||||
Host: host,
|
||||
//dataChan <- kubekeyv1alpha1.TaskHostResult{
|
||||
// Host: host,
|
||||
// Stdout: stdout,
|
||||
// StdErr: stderr,
|
||||
//}
|
||||
task.Status.HostResults[i] = kubekeyv1alpha1.TaskHostResult{
|
||||
Host: h,
|
||||
Stdout: stdout,
|
||||
StdErr: stderr,
|
||||
}
|
||||
}()
|
||||
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(host))
|
||||
ha, err := e.variable.Get(variable.GetAllVariable(h))
|
||||
if err != nil {
|
||||
stderr = fmt.Sprintf("get variable error: %v", err)
|
||||
return
|
||||
|
|
@ -487,7 +489,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
|
||||
for _, item := range loop {
|
||||
// set item to runtime variable
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
"item": item,
|
||||
})); err != nil {
|
||||
stderr = fmt.Sprintf("set loop item to variable error: %v", err)
|
||||
|
|
@ -498,7 +500,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
}
|
||||
stdout, stderr = e.executeModule(ctx, task, modules.ExecOptions{
|
||||
Args: task.Spec.Module.Args,
|
||||
Host: host,
|
||||
Host: h,
|
||||
Variable: e.variable,
|
||||
Task: *task,
|
||||
Pipeline: *e.pipeline,
|
||||
|
|
@ -507,7 +509,7 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
klog.ErrorS(err, "fail to add bar")
|
||||
}
|
||||
// delete item
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(host, map[string]any{
|
||||
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
|
||||
"item": nil,
|
||||
})); err != nil {
|
||||
stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
|
||||
|
|
@ -519,26 +521,18 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
|
|||
}
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(dataChan)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess
|
||||
for data := range dataChan {
|
||||
for _, data := range task.Status.HostResults {
|
||||
if data.StdErr != "" {
|
||||
if task.Spec.IgnoreError != nil && *task.Spec.IgnoreError {
|
||||
task.Status.Phase = kubekeyv1alpha1.TaskPhaseIgnored
|
||||
} else {
|
||||
task.Status.Phase = kubekeyv1alpha1.TaskPhaseFailed
|
||||
task.Status.HostResults = append(task.Status.HostResults, kubekeyv1alpha1.TaskHostResult{
|
||||
Host: data.Host,
|
||||
Stdout: data.Stdout,
|
||||
StdErr: data.StdErr,
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
cd.HostResults = append(cd.HostResults, data)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -22,9 +22,10 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1"
|
||||
_const "github.com/kubesphere/kubekey/v4/pkg/const"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// marshalPlaybook kkcorev1.Playbook from a playbook file
|
||||
|
|
|
|||
Loading…
Reference in New Issue