Merge pull request #270 from pixiake/master

update docs
This commit is contained in:
KubeSphere CI Bot 2020-08-29 17:27:00 +08:00 committed by GitHub
commit c53c06c3e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 111 additions and 43 deletions

View File

@ -196,29 +196,40 @@ You can delete the cluster by the following command:
```shell script
./kk delete cluster [-f config-sample.yaml]
```
### Enable kubectl autocompletion
KubeKey doesn't enable kubectl autocompletion. Refer to the guide below and turn it on:
**Prerequisite**: make sure bash-autocompletion is installed and works.
### Upgrade Cluster
#### Allinone
Upgrading cluster with a specified version.
```shell script
# Install bash-completion
apt-get install bash-completion
# Source the completion script in your ~/.bashrc file
echo 'source <(kubectl completion bash)' >>~/.bashrc
# Add the completion script to the /etc/bash_completion.d directory
kubectl completion bash >/etc/bash_completion.d/kubectl
./kk upgrade [--with-kubernetes version] [--with-kubesphere version]
```
* Support upgrading Kubernetes only.
* Support upgrading KubeSphere only.
* Support upgrading Kubernetes and KubeSphere.
More detail reference could be found [here](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion).
#### Multi-nodes
Upgrading cluster with a specified configuration file.
```shell script
./kk upgrade [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path]
```
* If `--with-kubernetes` or `--with-kubesphere` is specified, the configuration file will be also updated.
* Use `-f` to specify the configuration file which was generated for cluster creation.
> Note: Upgrading multi-nodes cluster need a specified configuration file. If the cluster was installed without kubekey or the configuration file for installation was not found, the configuration file needs to be created by yourself or following command.
Getting cluster info and generating kubekey's configuration file (optional).
```shell script
./kk create config [--from-cluster] [(-f | --file) path] [--kubeconfig path]
```
* `--from-cluster` means fetching cluster's information from an existing cluster.
* `-f` refers to the path where the configuration file is generated.
* `--kubeconfig` refers to the path where the kubeconfig.
* After generating the configuration file, some parameters need to be filled in, such as the ssh information of the nodes.
## Documents
* [Configuration example](docs/config-example.md)
* [Addons](docs/addons.md)
* [Network access](docs/network-access.md)
* [Storage clients](docs/storage-client.md)
* [Kubectl autocompletion](docs/kubectl-autocompletion.md)
* [Roadmap](docs/roadmap.md)

View File

@ -195,6 +195,35 @@ KubeSphere 有多个可插拔功能组件,功能组件的介绍可参考 [配
./kk delete cluster [-f config-sample.yaml]
```
### 集群升级
#### 单节点集群
升级集群到指定版本。
```shell script
./kk upgrade [--with-kubernetes version] [--with-kubesphere version]
```
* `--with-kubernetes` 指定kubernetes目标版本。
* `--with-kubesphere` 指定kubesphere目标版本。
#### 多节点集群
通过指定配置文件对集群进行升级。
```shell script
./kk upgrade [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path]
```
* `--with-kubernetes` 指定kubernetes目标版本。
* `--with-kubesphere` 指定kubesphere目标版本。
* `-f` 指定集群安装时创建的配置文件。
> 注意: 升级多节点集群需要指定配置文件. 如果集群非kubekey创建或者创建集群时生成的配置文件丢失需要重新生成配置文件或使用以下方法生成。
Getting cluster info and generating kubekey's configuration file (optional).
```shell script
./kk create config [--from-cluster] [(-f | --file) path] [--kubeconfig path]
```
* `--from-cluster` 根据已存在集群信息生成配置文件.
* `-f` 指定生成配置文件路径.
* `--kubeconfig` 指定集群kubeconfig文件.
* 由于无法全面获取集群配置,生成配置文件后,请根据集群实际信息补全配置文件。
### 启用 kubectl 自动补全
KubeKey 不会启用 kubectl 自动补全功能。请参阅下面的指南并将其打开:
@ -217,6 +246,7 @@ kubectl completion bash >/etc/bash_completion.d/kubectl
## 相关文档
* [配置示例](docs/config-example.md)
* [自定义插件安装](docs/addons.md)
* [网络访问](docs/network-access.md)
* [存储客户端](docs/storage-client.md)
* [路线图](docs/roadmap.md)

View File

@ -0,0 +1,19 @@
Enable kubectl autocompletion
------------
KubeKey doesn't enable kubectl autocompletion. Refer to the guide below and turn it on:
**Prerequisite**: make sure bash-autocompletion is installed and works.
```shell script
# Install bash-completion
apt-get install bash-completion
# Source the completion script in your ~/.bashrc file
echo 'source <(kubectl completion bash)' >>~/.bashrc
# Add the completion script to the /etc/bash_completion.d directory
kubectl completion bash >/etc/bash_completion.d/kubectl
```
More detail reference could be found [here](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion).

View File

@ -25,7 +25,6 @@ import (
"github.com/kubesphere/kubekey/pkg/util"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/pkg/errors"
"os"
"os/exec"
"path/filepath"
"regexp"
@ -170,11 +169,8 @@ func removeMasterTaint(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
func addWorkerLabel(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
if node.IsWorker {
addWorkerLabelCmd := fmt.Sprintf("sudo -E /bin/sh -c \"/usr/local/bin/kubectl label node %s node-role.kubernetes.io/worker=\"", node.Name)
output, err := mgr.Runner.ExecuteCmd(addWorkerLabelCmd, 5, true)
if err != nil && !strings.Contains(output, "already") {
return errors.Wrap(errors.WithStack(err), "Failed to add worker label")
}
addWorkerLabelCmd := fmt.Sprintf("sudo -E /bin/sh -c \"/usr/local/bin/kubectl label --overwrite node %s node-role.kubernetes.io/worker=\"", node.Name)
_, _ = mgr.Runner.ExecuteCmd(addWorkerLabelCmd, 5, true)
}
return nil
}
@ -222,13 +218,6 @@ func getJoinCmd(mgr *manager.Manager) error {
}
clusterStatus["kubeConfig"] = output
currentDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return errors.Wrap(err, "Faild to get current dir")
}
_ = exec.Command("/bin/sh", "-c", fmt.Sprintf("mkdir -p %s/kubekey", currentDir)).Run()
_ = exec.Command("sudo", "-E", fmt.Sprintf("/bin/sh -c \"echo %s | base64 -d > %s/kubekey/kubeconfig.yaml\"", clusterStatus["kubeConfig"], currentDir)).Run()
return nil
}
@ -314,12 +303,16 @@ func addWorker(mgr *manager.Manager) error {
}
createConfigDirCmd := "mkdir -p /root/.kube && mkdir -p $HOME/.kube"
chownKubeConfig := "chown $(id -u):$(id -g) $HOME/.kube/config"
chownKubeConfig := "chown $(id -u):$(id -g) -R $HOME/.kube"
if _, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"%s\"", createConfigDirCmd), 1, false); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to create kube dir")
}
syncKubeconfigCmd := fmt.Sprintf("echo %s | base64 -d > %s && echo %s | base64 -d > %s && %s", clusterStatus["kubeConfig"], "/root/.kube/config", clusterStatus["kubeConfig"], "$HOME/.kube/config", chownKubeConfig)
if _, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"%s\"", syncKubeconfigCmd), 1, false); err != nil {
syncKubeconfigForRootCmd := fmt.Sprintf("echo %s | base64 -d > %s", clusterStatus["kubeConfig"], "/root/.kube/config")
syncKubeconfigForUserCmd := fmt.Sprintf("echo %s | base64 -d > %s && %s", clusterStatus["kubeConfig"], "$HOME/.kube/config", chownKubeConfig)
if _, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"%s\"", syncKubeconfigForRootCmd), 1, false); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to sync kube config")
}
if _, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"%s\"", syncKubeconfigForUserCmd), 1, false); err != nil {
return errors.Wrap(errors.WithStack(err), "Failed to sync kube config")
}
return nil

View File

@ -28,13 +28,13 @@ type PrecheckResults struct {
}
var (
checkResults = make(map[string]interface{})
baseSoftwares = []string{"sudo", "curl", "openssl", "ebtables", "socat", "ipset", "conntrack", "docker", "showmount", "rbd", "glusterfs"}
CheckResults = make(map[string]interface{})
BaseSoftwares = []string{"sudo", "curl", "openssl", "ebtables", "socat", "ipset", "conntrack", "docker", "showmount", "rbd", "glusterfs"}
)
func Precheck(mgr *manager.Manager) error {
if !mgr.SkipCheck {
if err := mgr.RunTaskOnAllNodes(precheck, true); err != nil {
if err := mgr.RunTaskOnAllNodes(PrecheckNodes, true); err != nil {
return err
}
PrecheckConfirm(mgr)
@ -42,10 +42,10 @@ func Precheck(mgr *manager.Manager) error {
return nil
}
func precheck(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
func PrecheckNodes(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
var results = make(map[string]interface{})
results["name"] = node.Name
for _, software := range baseSoftwares {
for _, software := range BaseSoftwares {
_, err := mgr.Runner.ExecuteCmd(fmt.Sprintf("sudo -E /bin/sh -c \"which %s\"", software), 0, false)
switch software {
case "showmount":
@ -68,16 +68,16 @@ func precheck(mgr *manager.Manager, node *kubekeyapi.HostCfg) error {
results["time"] = strings.TrimSpace(output)
}
checkResults[node.Name] = results
CheckResults[node.Name] = results
return nil
}
func PrecheckConfirm(mgr *manager.Manager) {
var results []PrecheckResults
for node := range checkResults {
for node := range CheckResults {
var result PrecheckResults
_ = mapstructure.Decode(checkResults[node], &result)
_ = mapstructure.Decode(CheckResults[node], &result)
results = append(results, result)
}
table.OutputA(results)

View File

@ -250,9 +250,10 @@ Loop:
}
func DeployLocalVolume(mgr *manager.Manager) error {
if err := mgr.RunTaskOnMasterNodes(deployLocalVolume, true); err != nil {
return err
if mgr.Cluster.KubeSphere.Enabled {
if err := mgr.RunTaskOnMasterNodes(deployLocalVolume, true); err != nil {
return err
}
}
return nil

View File

@ -20,7 +20,10 @@ import (
"bufio"
"fmt"
kubekeyapi "github.com/kubesphere/kubekey/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/pkg/cluster/preinstall"
"github.com/kubesphere/kubekey/pkg/util/manager"
"github.com/mitchellh/mapstructure"
"github.com/modood/table"
"github.com/pkg/errors"
versionutil "k8s.io/apimachinery/pkg/util/version"
"os"
@ -50,6 +53,17 @@ var versionCheck = map[string]map[string]map[string]bool{
}
func GetClusterInfo(mgr *manager.Manager) error {
if err := mgr.RunTaskOnAllNodes(preinstall.PrecheckNodes, true); err != nil {
return err
}
var results []preinstall.PrecheckResults
for node := range preinstall.CheckResults {
var result preinstall.PrecheckResults
_ = mapstructure.Decode(preinstall.CheckResults[node], &result)
results = append(results, result)
}
table.OutputA(results)
fmt.Println()
return mgr.RunTaskOnMasterNodes(getClusterInfo, true)
}