mirror of
https://github.com/kubesphere/kubekey.git
synced 2025-12-26 01:22:51 +00:00
add node delete
Signed-off-by: “Forest-L <lilin@yunify.com>
This commit is contained in:
parent
a11d8e906c
commit
5e52c083fe
|
|
@ -0,0 +1,22 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubesphere/kubekey/pkg/delete"
|
||||
"github.com/kubesphere/kubekey/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "delete a node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger := util.InitLogger(opt.Verbose)
|
||||
delete.ResetNode(opt.ClusterCfgFile, logger, opt.Verbose, strings.Join(args, ""))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
deleteCmd.AddCommand(deleteNodeCmd)
|
||||
deleteNodeCmd.Flags().StringVarP(&opt.ClusterCfgFile, "config", "f", "", "Path to a config")
|
||||
}
|
||||
|
|
@ -26,7 +26,11 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func ResetCluster(clusterCfgFile string, logger *log.Logger, verbose bool) error {
|
||||
|
|
@ -38,6 +42,74 @@ func ResetCluster(clusterCfgFile string, logger *log.Logger, verbose bool) error
|
|||
return Execute(executor.NewExecutor(&cfg.Spec, logger, verbose, false, true))
|
||||
}
|
||||
|
||||
func ResetNode(clusterCfgFile string, logger *log.Logger, verbose bool, nodeName string) error {
|
||||
if "" == nodeName {
|
||||
return errors.New("Node name does not exist")
|
||||
}
|
||||
fp, _ := filepath.Abs(clusterCfgFile)
|
||||
cmd0 := fmt.Sprintf("cat %s | grep %s | wc -l", fp, nodeName)
|
||||
nodeNameNum, err0 := exec.Command("/bin/sh", "-c", cmd0).CombinedOutput()
|
||||
if err0 != nil {
|
||||
errors.Wrap(err0,"Failed to get node num")
|
||||
}
|
||||
if string(nodeNameNum) == "0\n"{
|
||||
fmt.Sprintf("Please check the node name in the config-sample.yaml")
|
||||
os.Exit(0)
|
||||
}
|
||||
if string(nodeNameNum) == "2\n" {
|
||||
cmd := fmt.Sprintf("sed -i /%s/d %s", nodeName, fp)
|
||||
_ = exec.Command("/bin/sh", "-c", cmd).Run()
|
||||
cfg, _ := config.ParseClusterCfg(clusterCfgFile, "","",false,logger)
|
||||
return Execute1(executor.NewExecutor(&cfg.Spec, logger, verbose, false,true))
|
||||
}
|
||||
if string(nodeNameNum) == "1\n" {
|
||||
cmd := fmt.Sprintf("sed -i /%s/d %s", nodeName, fp)
|
||||
_ = exec.Command("/bin/sh", "-c", cmd).Run()
|
||||
cfg, err := config.ParseClusterCfg(clusterCfgFile, "","",false, logger)
|
||||
if err != nil{
|
||||
errors.Wrap(err, "Failed to download cluster config")
|
||||
}
|
||||
mgr, err1 := executor.NewExecutor(&cfg.Spec, logger, verbose, false, true).CreateManager()
|
||||
if err1 != nil{
|
||||
errors.Wrap(err1, "Failed to get cluster config")
|
||||
}
|
||||
var newNodeName []string
|
||||
for i := 0; i<len(mgr.WorkerNodes);i++{
|
||||
nodename := mgr.WorkerNodes[i].Name
|
||||
if nodeName == nodename{
|
||||
continue
|
||||
}else {
|
||||
newNodeName = append(newNodeName, nodename)
|
||||
}
|
||||
}
|
||||
var connNodeName []string
|
||||
for j := 0;j <len(newNodeName);j++{
|
||||
t := j
|
||||
nodename1 := newNodeName[t]
|
||||
for ;t+1<len(newNodeName)&& Isadjoin(newNodeName[t],newNodeName[t+1]);{
|
||||
t ++
|
||||
}
|
||||
if t == j{
|
||||
connNodeName = append(connNodeName, nodename1)
|
||||
}else {
|
||||
connNodeName = append(connNodeName, Merge(nodename1, newNodeName[t]))
|
||||
j = t
|
||||
}
|
||||
}
|
||||
cmd1 := fmt.Sprintf("sed -i -n '1,/worker/p;/controlPlaneEndpoint/,$p' %s",fp)
|
||||
exec.Command("/bin/sh","-c",cmd1).Run()
|
||||
for k := 0; k<len(connNodeName);k++{
|
||||
workPar := connNodeName[k]
|
||||
workPar1 := fmt.Sprintf("%s",workPar)
|
||||
cmd2 := fmt.Sprintf("sed -i '/worker/a\\ \\ \\ \\ \\- %s' %s",workPar1, fp)
|
||||
exec.Command("/bin/sh", "-c",cmd2).Run()
|
||||
}
|
||||
cfg1, _ := config.ParseClusterCfg(clusterCfgFile, "","",false,logger)
|
||||
return Execute1(executor.NewExecutor(&cfg1.Spec, logger, verbose, false,true))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func Execute(executor *executor.Executor) error {
|
||||
mgr, err := executor.CreateManager()
|
||||
if err != nil {
|
||||
|
|
@ -45,7 +117,13 @@ func Execute(executor *executor.Executor) error {
|
|||
}
|
||||
return ExecTasks(mgr)
|
||||
}
|
||||
|
||||
func Execute1(executor * executor.Executor) error {
|
||||
mgr, err := executor.CreateManager()
|
||||
if err != nil{
|
||||
return err
|
||||
}
|
||||
return ExecTasks1(mgr)
|
||||
}
|
||||
func ExecTasks(mgr *manager.Manager) error {
|
||||
resetTasks := []manager.Task{
|
||||
{Task: ResetKubeCluster, ErrMsg: "Failed to reset kube cluster"},
|
||||
|
|
@ -61,6 +139,21 @@ func ExecTasks(mgr *manager.Manager) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
func ExecTasks1(mgr *manager.Manager) error {
|
||||
resetNodeTasks := []manager.Task{
|
||||
{Task: ResetKubeNode, ErrMsg: "Failed to reset kube cluster"},
|
||||
}
|
||||
|
||||
for _, step := range resetNodeTasks {
|
||||
if err := step.Run(mgr); err != nil {
|
||||
return errors.Wrap(err, step.ErrMsg)
|
||||
}
|
||||
}
|
||||
|
||||
mgr.Logger.Infoln("Successful.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ResetKubeCluster(mgr *manager.Manager) error {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
|
@ -76,6 +169,86 @@ func ResetKubeCluster(mgr *manager.Manager) error {
|
|||
|
||||
return mgr.RunTaskOnK8sNodes(resetKubeCluster, true)
|
||||
}
|
||||
func ResetKubeNode(mgr *manager.Manager) error {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
input, err := Confirm1(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if input == "no" {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
mgr.Logger.Infoln("Resetting kubernetes node ...")
|
||||
|
||||
return mgr.RunTaskOnMasterNodes(resetKubeNode, true)
|
||||
}
|
||||
|
||||
func resetKubeNode(mgr *manager.Manager, _ *kubekeyapi.HostCfg) error {
|
||||
var deletenodename string
|
||||
var tmp []string
|
||||
output1, _ := mgr.Runner.ExecuteCmd("sudo -E /usr/local/bin/kubectl get nodes | grep -v NAME | grep -v 'master' | awk '{print $1}'", 0, true)
|
||||
if !strings.Contains(output1,"\r\n"){
|
||||
tmp = append(tmp,output1)
|
||||
}else {
|
||||
tmp = strings.Split(output1, "\r\n")
|
||||
}
|
||||
var tmp1 string
|
||||
for j := 0; j < len(mgr.WorkerNodes); j++ {
|
||||
tmp1 += mgr.WorkerNodes[j].Name + "\r\n"
|
||||
}
|
||||
for i := 0; i < len(tmp); i++ {
|
||||
if strings.Contains(tmp1, tmp[i]) {
|
||||
continue
|
||||
} else {
|
||||
deletenodename = tmp[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
DrainAndDeleteNode(mgr, deletenodename)
|
||||
return nil
|
||||
}
|
||||
func DrainAndDeleteNode(mgr *manager.Manager, deleteNodeName string) error {
|
||||
_, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"/usr/local/bin/kubectl drain %s --delete-local-data --ignore-daemonsets\"", deleteNodeName), 5, true)
|
||||
if err != nil {
|
||||
errors.Wrap(err, "Failed to drain the node")
|
||||
}
|
||||
_, err1 := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"/usr/local/bin/kubectl delete node %s\"", deleteNodeName), 5, true)
|
||||
if err1 != nil {
|
||||
errors.Wrap(err1, "Failed to delete the node")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func Merge(name1, name2 string) (endname string) {
|
||||
par1, par2 := SplitNum(name1)
|
||||
_, par4 := SplitNum(name2)
|
||||
var endName string
|
||||
endName = fmt.Sprintf("%s[%s:%s]", par1, strconv.Itoa(par2), strconv.Itoa(par4))
|
||||
return endName
|
||||
}
|
||||
func Isadjoin(name1, name2 string) bool {
|
||||
Isad := false
|
||||
par1, par2 := SplitNum(name1)
|
||||
par3, par4 := SplitNum(name2)
|
||||
if par1 == par3 && par4 == par2+1 {
|
||||
Isad = true
|
||||
}
|
||||
return Isad
|
||||
}
|
||||
|
||||
func SplitNum(nodename string) (name string, num int) {
|
||||
nodelen := len(nodename)
|
||||
i := nodelen - 1
|
||||
for ; nodelen > 0; i-- {
|
||||
if !unicode.IsDigit(rune(nodename[i])) {
|
||||
num, _ := strconv.Atoi(nodename[i+1:])
|
||||
name := nodename[:i+1]
|
||||
return name, num
|
||||
break
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
var clusterFiles = []string{
|
||||
"/usr/local/bin/etcd",
|
||||
|
|
@ -134,3 +307,18 @@ func Confirm(reader *bufio.Reader) (string, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Confirm1(reader *bufio.Reader) (string, error) {
|
||||
for {
|
||||
fmt.Printf("Are you sure to delete this node? [yes/no]: ")
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if input != "" && (input == "yes" || input == "no") {
|
||||
return input, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue