mirror of
https://github.com/kubesphere/website.git
synced 2025-12-26 00:12:48 +00:00
commit
b2dc44d10e
|
|
@ -3,8 +3,13 @@
|
|||
{
|
||||
"pattern": "^/[^l][^o][^g].*$",
|
||||
"function": "go-get",
|
||||
"name": "go module"
|
||||
"name": "go-get for go module"
|
||||
},
|
||||
{
|
||||
"pattern": "^(/zh/docs/|/docs/).*$",
|
||||
"function": "redirect",
|
||||
"name": "redirect"
|
||||
}
|
||||
],
|
||||
"version": 1
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
export default async (req, context) => {
|
||||
const { url } = req;
|
||||
const parsedUrl = new URL(url);
|
||||
const path = parsedUrl.pathname;
|
||||
const regex = /^(\/zh\/docs|\/docs)\/v[0-9]+\.[0-9]+.*$/;
|
||||
|
||||
if (regex.test(path)) {
|
||||
return;
|
||||
} else {
|
||||
let hasContentAfterDocs = false;
|
||||
const newPath = path.replace(/(\/docs\/)(.*)$/, (match, p1, p2) => {
|
||||
if (p2.trim() === "") {
|
||||
return `${p1}v3.4/`;
|
||||
} else {
|
||||
hasContentAfterDocs = true;
|
||||
return `${p1}v3.4/${p2}`;
|
||||
}
|
||||
});
|
||||
|
||||
const redirectStatusCode = hasContentAfterDocs ? 301 : 302;
|
||||
const redirectUrl = new URL(newPath, req.url);
|
||||
return Response.redirect(redirectUrl, redirectStatusCode);
|
||||
}
|
||||
};
|
||||
|
|
@ -116,45 +116,51 @@ name = "Documentation"
|
|||
hasChildren = true
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v3.3 <img src='/images/header/star.svg' alt='star'>"
|
||||
URL = "/docs/v3.3"
|
||||
name = "v3.4 <img src='/images/header/star.svg' alt='star'>"
|
||||
URL = "/docs/v3.4"
|
||||
weight = 1
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v3.3"
|
||||
URL = "/docs/v3.3"
|
||||
weight = 2
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v3.2"
|
||||
URL = "https://v3-2.docs.kubesphere.io/docs"
|
||||
weight = 2
|
||||
weight = 3
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v3.1"
|
||||
URL = "https://v3-1.docs.kubesphere.io/docs"
|
||||
weight = 3
|
||||
weight = 4
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v3.0"
|
||||
URL = "https://v3-0.docs.kubesphere.io/docs"
|
||||
weight = 4
|
||||
weight = 5
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v2.1"
|
||||
URL = "https://v2-1.docs.kubesphere.io/docs"
|
||||
weight = 5
|
||||
weight = 6
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v2.0"
|
||||
URL = "https://v2-0.docs.kubesphere.io/docs/"
|
||||
weight = 6
|
||||
weight = 7
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
parent = "Documentation"
|
||||
name = "v1.0"
|
||||
URL = "https://v1-0.docs.kubesphere.io/docs/"
|
||||
weight = 7
|
||||
weight = 8
|
||||
|
||||
[[languages.en.menu.main]]
|
||||
weight = 5
|
||||
|
|
@ -277,45 +283,51 @@ hasChildren = true
|
|||
name = "文档中心"
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v3.3 <img src='/images/header/star.svg' alt='star'>"
|
||||
URL = "/docs/v3.3"
|
||||
name = "v3.4 <img src='/images/header/star.svg' alt='star'>"
|
||||
URL = "/docs/v3.4"
|
||||
weight = 1
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v3.3"
|
||||
URL = "/docs/v3.3"
|
||||
weight = 2
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v3.2"
|
||||
URL = "https://v3-2.docs.kubesphere.io/zh/docs/"
|
||||
weight = 2
|
||||
weight = 3
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v3.1"
|
||||
URL = "https://v3-1.docs.kubesphere.io/zh/docs/"
|
||||
weight = 3
|
||||
weight = 4
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v3.0"
|
||||
URL = "https://v3-0.docs.kubesphere.io/zh/docs/"
|
||||
weight = 4
|
||||
weight = 5
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v2.1"
|
||||
URL = "https://v2-1.docs.kubesphere.io/docs/zh-CN/"
|
||||
weight = 5
|
||||
weight = 6
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v2.0"
|
||||
URL = "https://v2-0.docs.kubesphere.io/docs/zh-CN/"
|
||||
weight = 6
|
||||
weight = 7
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
parent = "文档中心"
|
||||
name = "v1.0"
|
||||
URL = "https://v1-0.docs.kubesphere.io/docs/zh-CN/"
|
||||
weight = 7
|
||||
weight = 8
|
||||
|
||||
[[languages.zh.menu.main]]
|
||||
weight = 5
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@ _build:
|
|||
|
||||
| Installation Tool | KubeSphere version | Supported Kubernetes versions |
|
||||
| ----------------- | ------------------ | ------------------------------------------------------------ |
|
||||
| KubeKey | 3.3.2 | v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, * v1.24.x|
|
||||
| ks-installer | 3.3.2 | v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, * v1.24.x|
|
||||
| KubeKey | 3.3.2 | v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, * v1.24.x|
|
||||
| ks-installer | 3.3.2 | v1.20.x, v1.21.x, * v1.22.x, * v1.23.x, * v1.24.x|
|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ LinkTitle: "Documentation"
|
|||
section1:
|
||||
title: KubeSphere Documentation
|
||||
content: Learn how to build and manage cloud-native applications using KubeSphere Container Platform. Get documentation, example code, tutorials, and more.
|
||||
image: /images/docs/v3.3/banner.png
|
||||
image: /images/docs/v3.x/banner.png
|
||||
|
||||
sectionLink:
|
||||
docs:
|
||||
|
|
@ -46,13 +46,13 @@ section3:
|
|||
title: Run KubeSphere and Kubernetes Stack from the Cloud Service
|
||||
description: Cloud Providers are providing KubeSphere as a cloud-hosted service for users, helping you to create a highly available Kubernetes cluster managed by KubeSphere within minutes via several clicks. It enables you to use the cloud-hosted Kubernetes services out of the box.
|
||||
list:
|
||||
- image: /images/docs/v3.3/aws.jpg
|
||||
- image: /images/docs/v3.x/aws.jpg
|
||||
content: AWS Quickstart
|
||||
link: https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/
|
||||
- image: /images/docs/v3.3/microsoft-azure.jpg
|
||||
- image: /images/docs/v3.x/microsoft-azure.jpg
|
||||
content: Azure Marketplace
|
||||
link: https://market.azure.cn/marketplace/apps/qingcloud.kubesphere
|
||||
- image: /images/docs/v3.3/qingcloud.svg
|
||||
- image: /images/docs/v3.x/qingcloud.svg
|
||||
content: QingCloud QKE
|
||||
link: https://www.qingcloud.com/products/kubesphereqke/
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ layout: "second"
|
|||
linkTitle: "Access Control and Account Management"
|
||||
weight: 12000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. F
|
|||
|
||||
## Procedure
|
||||
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.3/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.x/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
|
||||
```bash
|
||||
kubectl -n kubesphere-system edit cc ks-installer
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. F
|
|||
|
||||
## Procedure
|
||||
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.3/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.x/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
|
||||
```bash
|
||||
kubectl -n kubesphere-system edit cc ks-installer
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ This document describes how to use an LDAP service as an external identity provi
|
|||
|
||||
## Procedure
|
||||
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.3/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.x/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
|
||||
```bash
|
||||
kubectl -n kubesphere-system edit cc ks-installer
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ This document describes how to use an external identity provider based on the OA
|
|||
|
||||
The following figure shows the authentication process between KubeSphere and an external OAuth 2.0 identity provider.
|
||||
|
||||

|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ KubeSphere provides two built-in OAuth 2.0 plugins: [GitHubIdentityProvider](htt
|
|||
|
||||
## Integrate an Identity Provider with KubeSphere
|
||||
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.3/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
1. Log in to KubeSphere as `admin`, move the cursor to <img src="/images/docs/v3.x/access-control-and-account-management/external-authentication/set-up-external-authentication/toolbox.png" width="20px" height="20px" alt="icon"> in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`:
|
||||
|
||||
```bash
|
||||
kubectl -n kubesphere-system edit cc ks-installer
|
||||
|
|
@ -126,5 +126,5 @@ KubeSphere provides two built-in OAuth 2.0 plugins: [GitHubIdentityProvider](htt
|
|||
|
||||
6. On the login page of the external identity provider, enter the username and password of a user configured at the identity provider to log in to KubeSphere.
|
||||
|
||||

|
||||

|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ The isolation of physical resources includes nodes and networks, while it also r
|
|||
|
||||
To solve the issues above, KubeSphere provides a multi-tenant management solution based on Kubernetes.
|
||||
|
||||

|
||||

|
||||
|
||||
In KubeSphere, the [workspace](../../workspace-administration/what-is-workspace/) is the smallest tenant unit. A workspace enables users to share resources across clusters and projects. Workspace members can create projects in an authorized cluster and invite other members to cooperate in the same project.
|
||||
|
||||
|
|
@ -54,4 +54,4 @@ KubeSphere also provides [auditing logs](../../pluggable-components/auditing-log
|
|||
|
||||
For a complete authentication and authorization chain in KubeSphere, see the following diagram. KubeSphere has expanded RBAC rules using the Open Policy Agent (OPA). The KubeSphere team looks to integrate [Gatekeeper](https://github.com/open-policy-agent/gatekeeper) to provide more security management policies.
|
||||
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ layout: "second"
|
|||
linkTitle: "App Store"
|
||||
weight: 14000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ After the app is approved, `isv` can release the Redis application to the App St
|
|||
|
||||
`app-reviewer` can create multiple categories for different types of applications based on their function and usage. It is similar to setting tags and categories can be used in the App Store as filters, such as Big Data, Middleware, and IoT.
|
||||
|
||||
1. Log in to KubeSphere as `app-reviewer`. To create a category, go to the **App Store Management** page and click <img src="/images/docs/v3.3/appstore/application-lifecycle-management/plus.png" height="20px"> in **App Categories**.
|
||||
1. Log in to KubeSphere as `app-reviewer`. To create a category, go to the **App Store Management** page and click <img src="/images/docs/v3.x/appstore/application-lifecycle-management/plus.png" height="20px"> in **App Categories**.
|
||||
|
||||
2. Set a name and icon for the category in the dialog, then click **OK**. For Redis, you can enter `Database` for the field **Name**.
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ linkTitle: "Deploy Chaos Mesh on KubeSphere"
|
|||
|
||||
[Chaos Mesh](https://github.com/chaos-mesh/chaos-mesh) is a cloud-native Chaos Engineering platform that orchestrates chaos in Kubernetes environments. With Chaos Mesh, you can test your system's resilience and robustness on Kubernetes by injecting various types of faults into Pods, network, file system, and even the kernel.
|
||||
|
||||

|
||||

|
||||
|
||||
## Enable App Store on KubeSphere
|
||||
|
||||
|
|
@ -22,34 +22,34 @@ linkTitle: "Deploy Chaos Mesh on KubeSphere"
|
|||
|
||||
1. Login KubeSphere as `project-regular`, search for **chaos-mesh** in the **App Store**, and click on the search result to enter the app.
|
||||
|
||||

|
||||

|
||||
|
||||
2. In the **App Information** page, click **Install** on the upper right corner.
|
||||
|
||||

|
||||

|
||||
|
||||
3. In the **App Settings** page, set the application **Name,** **Location** (as your Namespace), and **App Version**, and then click **Next** on the upper right corner.
|
||||
|
||||

|
||||

|
||||
|
||||
4. Configure the `values.yaml` file as needed, or click **Install** to use the default configuration.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Wait for the deployment to be finished. Upon completion, Chaos Mesh will be shown as **Running** in KubeSphere.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
### Step 2: Visit Chaos Dashboard
|
||||
|
||||
1. In the **Resource Status** page, copy the **NodePort **of `chaos-dashboard`.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Access the Chaos Dashboard by entering `${NodeIP}:${NODEPORT}` in your browser. Refer to [Manage User Permissions](https://chaos-mesh.org/docs/manage-user-permissions/) to generate a Token and log into Chaos Dashboard.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 3: Create a chaos experiment
|
||||
|
||||
|
|
@ -63,20 +63,20 @@ curl -sSL https://mirrors.chaos-mesh.org/latest/web-show/deploy.sh | bash
|
|||
|
||||
1. From your web browser, visit ${NodeIP}:8081 to access the **Web Show** application.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Log in to Chaos Dashboard to create a chaos experiment. To observe the effect of network latency on the application, we set the **Target **as "Network Attack" to simulate a network delay scenario.
|
||||
|
||||

|
||||

|
||||
|
||||
The **Scope** of the experiment is set to `app: web-show`.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Start the chaos experiment by submitting it.
|
||||
|
||||

|
||||

|
||||
|
||||
Now, you should be able to visit **Web Show** to observe experiment results:
|
||||
|
||||

|
||||

|
||||
|
|
@ -48,7 +48,7 @@ This tutorial walks you through an example of deploying [Harbor](https://goharbo
|
|||
|
||||
1. Based on the field `expose.type` you set in the configuration file, the access method may be different. As this example uses `nodePort` to access Harbor, visit `http://<NodeIP>:30002` as set in the previous step.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ This tutorial walks you through an example of deploying [Harbor](https://goharbo
|
|||
|
||||
2. Log in to Harbor using the default account and password (`admin/Harbor12345`). The password is defined in the field `harborAdminPassword` in the configuration file.
|
||||
|
||||

|
||||

|
||||
|
||||
## FAQ
|
||||
|
||||
|
|
|
|||
|
|
@ -21,33 +21,33 @@ This tutorial walks you through an example of deploying Meshery from the App Sto
|
|||
1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner.
|
||||
2. Search for **Meshery** in the App Store, and click on the search result to enter the app.
|
||||
|
||||

|
||||

|
||||
|
||||
3. In the **App Information** page, click **Install** on the upper right corner.
|
||||
|
||||

|
||||

|
||||
|
||||
4. In the App Settings page, set the application **Name**, **Location** (as your Namespace), and App Version, and then click Next on the upper right corner.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Configure the **values.yaml** file as needed, or click **Install** to use the default configuration.
|
||||
|
||||

|
||||

|
||||
|
||||
6. Wait for the deployment to be finished. Upon completion, **Meshery** will be shown as **Running** in KubeSphere.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 2: Access the Meshery Dashboard
|
||||
|
||||
1. Go to **Services** and click the service name of Meshery.
|
||||
2. In the **Resource Status** page, copy the **NodePort** of Meshery.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Access the Meshery Dashboard by entering **${NodeIP}:${NODEPORT}** in your browser.
|
||||
|
||||

|
||||

|
||||
|
||||
4. For more information about Meshery, refer to [the official documentation of Meshery](https://docs.meshery.io/).
|
||||
|
|
@ -44,9 +44,9 @@ To access MinIO outside the cluster, you need to expose the app through a NodePo
|
|||
|
||||
6. Access the MinIO browser through `<NodeIP>:<NodePort>` using `accessKey` and `secretKey`.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ This tutorial walks you through an example of deploying MongoDB from the App Sto
|
|||
|
||||
3. In the pop-up window, enter commands in the terminal directly to use the app.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ This tutorial walks you through an example of deploying MySQL from the App Store
|
|||
|
||||
3. In the terminal, execute `mysql -uroot -ptesting` to log in to MySQL as the root user.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 3: Access the MySQL database outside the cluster
|
||||
|
||||
|
|
@ -53,9 +53,9 @@ To access MySQL outside the cluster, you need to expose the app through a NodePo
|
|||
|
||||
5. To access your MySQL database, you need to use the MySQL client or install a third-party application such as SQLPro Studio for the connection. The following example demonstrates how to access the MySQL database through SQLPro Studio.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ To access NGINX outside the cluster, you need to expose the app through a NodePo
|
|||
|
||||
5. Access NGINX through `<NodeIP>:<NodePort>`.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ To access PostgreSQL outside the cluster, you need to expose the app through a N
|
|||
|
||||
5. Expand the Pod menu under **Pods** and click the **Terminal** icon. In the pop-up window, enter commands directly to access the database.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -48,9 +48,9 @@ To access RabbitMQ outside the cluster, you need to expose the app through a Nod
|
|||
|
||||
5. Access RabbitMQ **management** through `<NodeIP>:<NodePort>`. Note that the username and password are those you set in **Step 1**.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,6 @@ We recommend you to use the latest version of RadonDB MySQL. For deployment inst
|
|||
|
||||
3. In the pop-up window, enter commands in the terminal directly to use the app.
|
||||
|
||||

|
||||

|
||||
|
||||
4. If you want to access RadonDB MySQL outside the cluster, see [the open-source project of RadonDB MySQL](https://github.com/radondb/radondb-mysql-kubernetes) in detail.
|
||||
|
|
|
|||
|
|
@ -57,6 +57,6 @@ This tutorial demonstrates how to deploy RadonDB PostgreSQL from the App Store o
|
|||
psql -h <Pod name> -p 5432 -U postgres -d postgres
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
4. If you want to access RadonDB PostgreSQL outside the cluster, see [the open-source project of RadonDB PostgreSQL](https://github.com/radondb/radondb-postgresql-kubernetes) in detail.
|
||||
|
|
|
|||
|
|
@ -43,6 +43,6 @@ This tutorial walks you through an example of deploying Redis from the App Store
|
|||
|
||||
3. In the pop-up window, use the `redis-cli` command in the terminal to use the app.
|
||||
|
||||

|
||||

|
||||
|
||||
4. For more information, see [the official documentation of Redis](https://redis.io/documentation).
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ This tutorial walks you through an example of deploying Tomcat from the App Stor
|
|||
|
||||
3. You can view deployed projects in `/usr/local/tomcat/webapps`.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 3: Access a Tomcat project from your browser
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ To access a Tomcat project outside the cluster, you need to expose the app throu
|
|||
|
||||
5. Access the sample Tomcat project through `<NodeIP>:<NodePort>/sample` in your browser.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl
|
|||
$ kubectl edit chi clickho-749j8s -n demo-project
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -135,7 +135,7 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl
|
|||
$ kubectl exec -it chi-clickho-749j8s-all-nodes-0-0-0 -n demo-project -- clickhouse-client --user=clickhouse --password=c1ickh0use0perator
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere.
|
|||
|
||||
1. Go to **Secrets** under **Configuration**, enter `gitlab-initial-root-password` in the search box, and then press **Enter** on your keyboard to search the Secret.
|
||||
|
||||
2. Click the Secret to go to its detail page, and then click <img src="/images/docs/v3.3/appstore/external-apps/deploy-gitlab/eye-icon.png" width="20px" alt="icon" /> in the upper-right corner to view the password. Make sure you copy it.
|
||||
2. Click the Secret to go to its detail page, and then click <img src="/images/docs/v3.x/appstore/external-apps/deploy-gitlab/eye-icon.png" width="20px" alt="icon" /> in the upper-right corner to view the password. Make sure you copy it.
|
||||
|
||||
### Step 4: Edit the hosts file
|
||||
|
||||
|
|
@ -108,9 +108,9 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere.
|
|||
|
||||
2. Access GitLab through `http://gitlab.demo-project.svc.cluster.local:31246` using the root account and its initial password (`root/ojPWrWECLWN0XFJkGs7aAqtitGMJlVfS0fLEDE03P9S0ji34XDoWmxs2MzgZRRWF`).
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,9 +59,9 @@ This tutorial demonstrates how to deploy Litmus on KubeSphere and create chaos e
|
|||
|
||||
2. You can access Litmus `Portal` through `${NodeIP}:${NODEPORT}` using the default username and password (`admin`/`litmus`).
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
You may need to open the port in your security groups and configure port forwarding rules depending on where your Kubernetes cluster is deployed. Make sure you use your own `NodeIP`.
|
||||
|
|
@ -107,7 +107,7 @@ For details about how to deploy External Agent, see [Litmus Docs](https://litmus
|
|||
|
||||
On the Litmus `Portal`, you can see that the experiment is successful.
|
||||
|
||||

|
||||

|
||||
|
||||
You can click a specific workflow node to view its detailed logs.
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ For details about how to deploy External Agent, see [Litmus Docs](https://litmus
|
|||
|
||||
You can ping the Pod IP address to test the packet loss rate.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ This tutorial demonstrates how to deploy MeterSphere on KubeSphere.
|
|||
|
||||
2. You can access MeterSphere through `<NodeIP>:<NodePort>` using the default account and password (`admin/metersphere`).
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ This tutorial demonstrates how to deploy RadonDB MySQL Operator and a RadonDB My
|
|||
|
||||
Your repository displays in the list after it is successfully imported to KubeSphere.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 2: Deploy RadonDB MySQL Operator
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ This tutorial demonstrates how to deploy RadonDB MySQL Operator and a RadonDB My
|
|||
On the **Chart Files** tab, you can view the configuration and edit the `.yaml` files.
|
||||
On the **Version** list, you can view the app versions and select a version.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Click **Deploy**, go to the **Basic Information** page.
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ Go to the `demo-project` project management page, access RadonDB MySQL through t
|
|||
|
||||
4. In terminal window, run the following command to access the RadonDB MySQL cluster.
|
||||
|
||||

|
||||

|
||||
|
||||
**Method 2**
|
||||
|
||||
|
|
@ -157,4 +157,4 @@ In the blow command, `sample-mysql-0` is the Pod name and `demo-project` is the
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ The process of deploying a TiDB cluster is similar to deploying TiDB Operator.
|
|||
|
||||
2. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `<NodeIP>:<NodePort>` to view metrics.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ linkTitle: "Cluster Administration"
|
|||
|
||||
weight: 8000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -61,14 +61,14 @@ You need to prepare a user with the `platform-admin` role, for example, `admin`.
|
|||
3. Click the **Monitoring** tab to view the monitoring metrics of the cluster gateway.
|
||||
4. Click the **Configuration Options** tab to view configuration options of the cluster gateway.
|
||||
5. Click the **Gateway Logs** tab to view logs of the cluster gateway.
|
||||
6. Click the **Resource Status** tab to view workload status of the cluster gateway. Click <img src="/images/docs/v3.3/common-icons/replica-plus-icon.png" width="15" alt="icon" /> or <img src="/images/docs/v3.3/common-icons/replica-minus-icon.png" width="15" /> to scale up or scale down the number of replicas.
|
||||
6. Click the **Resource Status** tab to view workload status of the cluster gateway. Click <img src="/images/docs/v3.x/common-icons/replica-plus-icon.png" width="15" alt="icon" /> or <img src="/images/docs/v3.x/common-icons/replica-minus-icon.png" width="15" /> to scale up or scale down the number of replicas.
|
||||
7. Click the **Metadata** tab to view annotations of the cluster gateway.
|
||||
|
||||
## View Project Gateways
|
||||
|
||||
On the **Gateway Settings** page, click the **Project Gateway** tab to view project gateways.
|
||||
|
||||
Click <img src="/images/docs/v3.3/project-administration/role-and-member-management/three-dots.png" width="20px" alt="icon"> on the right of a project gateway to select an operation from the drop-down menu:
|
||||
Click <img src="/images/docs/v3.x/project-administration/role-and-member-management/three-dots.png" width="20px" alt="icon"> on the right of a project gateway to select an operation from the drop-down menu:
|
||||
|
||||
- **Edit**: Edit configurations of the project gateway.
|
||||
- **Disable**: Disable the project gateway.
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ KubeSphere also has built-in policies which will trigger alerts if conditions de
|
|||
|
||||
## Edit an Alerting Policy
|
||||
|
||||
To edit an alerting policy after it is created, on the **Alerting Policies** page, click <img src="/images/docs/v3.3/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/edit-policy.png" height="25px" alt="icon"> on the right of the alerting policy.
|
||||
To edit an alerting policy after it is created, on the **Alerting Policies** page, click <img src="/images/docs/v3.x/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/edit-policy.png" height="25px" alt="icon"> on the right of the alerting policy.
|
||||
|
||||
1. Click **Edit** from the drop-down list and edit the alerting policy following the same steps as you create it. Click **OK** on the **Message Settings** page to save it.
|
||||
|
||||
|
|
@ -62,8 +62,8 @@ Under **Monitoring**, the **Alert Monitoring** chart shows the actual usage or a
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
You can click <img src="/images/docs/v3.3/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/drop-down-list.png" width='20' alt="icon" /> in the upper-right corner to select or custom a time range for the alert monitoring chart.
|
||||
You can click <img src="/images/docs/v3.x/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/drop-down-list.png" width='20' alt="icon" /> in the upper-right corner to select or custom a time range for the alert monitoring chart.
|
||||
|
||||
You can also click <img src="/images/docs/v3.3/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/refresh.png" width='25' alt="icon" /> in the upper-right corner to manually refresh the alert monitoring chart.
|
||||
You can also click <img src="/images/docs/v3.x/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/refresh.png" width='25' alt="icon" /> in the upper-right corner to manually refresh the alert monitoring chart.
|
||||
|
||||
{{</ notice >}}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ Alertmanager handles alerts sent by client applications such as the Prometheus s
|
|||
|
||||
KubeSphere has been using Prometheus as its monitoring service's backend from the first release. Starting from v3.0, KubeSphere adds Alertmanager to its monitoring stack to manage alerts sent from Prometheus as well as other components such as [kube-events](https://github.com/kubesphere/kube-events) and kube-auditing.
|
||||
|
||||

|
||||

|
||||
|
||||
## Use Alertmanager to Manage Prometheus Alerts
|
||||
|
||||
|
|
|
|||
|
|
@ -41,9 +41,9 @@ Cluster nodes are only accessible to cluster administrators. Some node metrics a
|
|||
## Node Management
|
||||
On the **Cluster Nodes** page, you can perform the following operations:
|
||||
|
||||
- **Cordon/Uncordon**: Click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the cluster node, and then click **Cordon** or **Uncordon**. Marking a node as unschedulable is very useful during a node reboot or other maintenance. The Kubernetes scheduler will not schedule new Pods to this node if it's been marked unschedulable. Besides, this does not affect existing workloads already on the node.
|
||||
- **Cordon/Uncordon**: Click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the cluster node, and then click **Cordon** or **Uncordon**. Marking a node as unschedulable is very useful during a node reboot or other maintenance. The Kubernetes scheduler will not schedule new Pods to this node if it's been marked unschedulable. Besides, this does not affect existing workloads already on the node.
|
||||
|
||||
- **Open Terminal**:Click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the cluster node, and then click **Open Terminal**. This makes it convenient for you to manage nodes, such as modifying node configurations and downloading images.
|
||||
- **Open Terminal**:Click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the cluster node, and then click **Open Terminal**. This makes it convenient for you to manage nodes, such as modifying node configurations and downloading images.
|
||||
|
||||
- **Edit Taints**:Taints allow a node to repel a set of pods. To edit a taint, select the check box before the target node. On the **Edit Taints** that is displayed, you can add, delete, or modify taints.
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ This tutorial demonstrates how to configure a email server and add recipients to
|
|||
|
||||
2. After it is added, the email address of a recipient will be listed under **Recipient Settings**. You can add up to 50 recipients and all of them will be able to receive email notifications.
|
||||
|
||||
3. To remove a recipient, hover over the email address you want to remove, then click <img src="/images/docs/v3.3/common-icons/trashcan.png" width="25" height="25" alt="icon" />.
|
||||
3. To remove a recipient, hover over the email address you want to remove, then click <img src="/images/docs/v3.x/common-icons/trashcan.png" width="25" height="25" alt="icon" />.
|
||||
|
||||
### Set notification conditions
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ This tutorial demonstrates how to configure a email server and add recipients to
|
|||
|
||||
2. You can click **Add** to add notification conditions.
|
||||
|
||||
3. You can click <img src="/images/docs/v3.3/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
3. You can click <img src="/images/docs/v3.x/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
|
||||
4. After the configurations are complete, you can click **Send Test Message** for verification.
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ You must provide the Slack token on the console for authentication so that KubeS
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||
9. You can click **Add** to add notification conditions, or click <img src="/images/docs/v3.3/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
9. You can click **Add** to add notification conditions, or click <img src="/images/docs/v3.x/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
|
||||
10. After the configurations are complete, you can click **Send Test Message** for verification.
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ You need to prepare a user granted the `platform-admin` role. For more informati
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||
6. You can click **Add** to add notification conditions, or click <img src="/images/docs/v3.3/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
6. You can click **Add** to add notification conditions, or click <img src="/images/docs/v3.x/common-icons/trashcan.png" width='25' height='25' alt="icon" /> on the right of a notification condition to delete the condition.
|
||||
|
||||
7. After the configurations are complete, you can click **Send Test Message** for verification.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ You need to have a user with the `platform-admin` role, for example, the `admin`
|
|||
|
||||
1. Log in to the KubeSphere console as `admin`.
|
||||
|
||||
2. Click <img src="/images/docs/v3.3/common-icons/hammer.png" width="15" alt="icon" /> in the lower-right corner and select **Kubectl**.
|
||||
2. Click <img src="/images/docs/v3.x/common-icons/hammer.png" width="15" alt="icon" /> in the lower-right corner and select **Kubectl**.
|
||||
|
||||
3. In the displayed dialog box, run the following command:
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ layout: "single"
|
|||
linkTitle: "Cluster Shutdown and Restart"
|
||||
weight: 8800
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
This document describes the process of gracefully shutting down your Kubernetes cluster and how to restart it. You might need to temporarily shut down your cluster for maintenance reasons.
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ layout: "second"
|
|||
linkTitle: "DevOps User Guide"
|
||||
weight: 11000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
To deploy and manage your CI/CD tasks and related workloads on your Kubernetes clusters, you use the KubeSphere DevOps system. This chapter demonstrates how to manage and work in DevOps projects, including running pipelines, creating credentials, and integrating tools.
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ weight: 11430
|
|||
|
||||
As is shown in the graph below, there is the workflow for a Maven project in KubeSphere DevOps, which uses a Jenkins pipeline to build and deploy the Maven project. All steps are defined in the pipeline.
|
||||
|
||||

|
||||

|
||||
|
||||
At first, the Jenkins Master creates a Pod to run the pipeline. Kubernetes creates the Pod as the agent of Jenkins Master, and the Pod will be destroyed after the pipeline finished. The main process includes cloning code, building and pushing an image, and deploying the workload.
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ This tutorial demonstrates how to create a multi-cluster pipeline on KubeSphere.
|
|||
|
||||
This tutorial uses three clusters to serve as three isolated environments in the workflow. See the diagram as below.
|
||||
|
||||

|
||||

|
||||
|
||||
The three clusters are used for development, testing, and production respectively. Once codes get submitted to a Git repository, a pipeline will be triggered to run through the following stages—`Unit Test`, `SonarQube Analysis`, `Build & Push`, and `Deploy to Development Cluster`. Developers use the development cluster for self-testing and validation. When developers give approval, the pipeline will proceed to the stage of `Deploy to Testing Cluster` for stricter validation. Finally, the pipeline, with necessary approval ready, will reach the stage of `Deploy to Production Cluster` to provide services externally.
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere.
|
|||
|
||||
### Step 1: Get a Repository URL on Nexus
|
||||
|
||||
1. Log in to the Nexus console as `admin` and click <img src="/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/gear.png" height="18px" alt="icon" /> on the top navigation bar.
|
||||
1. Log in to the Nexus console as `admin` and click <img src="/images/docs/v3.x/devops-user-guide/examples/use-nexus-in-pipeline/gear.png" height="18px" alt="icon" /> on the top navigation bar.
|
||||
|
||||
2. Go to the **Repositories** page and you can see that Nexus provides three types of repository.
|
||||
|
||||
|
|
@ -37,9 +37,9 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere.
|
|||
|
||||
2. In your own GitHub repository of **learn-pipeline-java**, click the file `pom.xml` in the root directory.
|
||||
|
||||
3. Click <img src="/images/docs/v3.3/devops-user-guide/examples/use-nexus-in-pipeline/github-edit-icon.png" height="18px" alt="icon" /> to modify the code segment of `<distributionManagement>` in the file. Set the `<id>` and use the URLs of your own Nexus repositories.
|
||||
3. Click <img src="/images/docs/v3.x/devops-user-guide/examples/use-nexus-in-pipeline/github-edit-icon.png" height="18px" alt="icon" /> to modify the code segment of `<distributionManagement>` in the file. Set the `<id>` and use the URLs of your own Nexus repositories.
|
||||
|
||||

|
||||

|
||||
|
||||
4. When you finish, click **Commit changes** at the bottom of the page.
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere.
|
|||
</servers>
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere.
|
|||
</mirrors>
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -160,11 +160,11 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere.
|
|||
|
||||
4. Log in to Nexus and click **Browse**. Click **maven-public** and you can see all the dependencies have been downloaded.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Go back to the **Browse** page and click **maven-snapshots**. You can see the JAR package has been uploaded to the repository.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
|
|||
|
||||
3. You will get this prompt:
|
||||
|
||||

|
||||

|
||||
|
||||
## Get the SonarQube Console Address
|
||||
|
||||
|
|
@ -99,15 +99,15 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
|
|||
|
||||
1. Click the letter **A** and select **My Account** from the menu to go to the **Profile** page.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Security** and enter a token name, such as `kubesphere`.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Generate** and copy the token.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice warning >}}
|
||||
|
||||
|
|
@ -133,15 +133,15 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
|
|||
|
||||
3. Click **Administration**, **Configuration** and **Webhooks** in turn to create a webhook.
|
||||
|
||||

|
||||

|
||||
|
||||
4. Click **Create**.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Enter **Name** and **Jenkins Console URL** (for example, the SonarQube Webhook address) in the displayed dialog box. Click **Create** to finish.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 4: Add the SonarQube configuration to ks-installer
|
||||
|
||||
|
|
@ -201,9 +201,9 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
|
|||
|
||||
7. Enter **Name** and **Server URL** (`http://<Node IP>:<NodePort>`). Click **Add**, select **Jenkins**, and then create the credentials with the SonarQube admin token in the displayed dialog box as shown in the second image below. After adding the credentials, select it from the drop-down list for **Server authentication token** and then click **Apply** to finish.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -250,23 +250,23 @@ You need a SonarQube token so that your pipeline can communicate with SonarQube
|
|||
|
||||
1. On the SonarQube console, click **Create new project**.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Enter a project key, such as `java-demo`, and click **Set Up**.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Enter a project name, such as `java-sample`, and click **Generate**.
|
||||
|
||||

|
||||

|
||||
|
||||
4. After the token is created, click **Continue**.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Choose **Java** and **Maven** respectively. Copy the serial number within the green box in the image below, which needs to be added in the [Credentials](../../../devops-user-guide/how-to-use/devops-settings/credential-management/#create-credentials) section if it is to be used in pipelines.
|
||||
|
||||

|
||||

|
||||
|
||||
## View Results on the KubeSphere Console
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ In KubeSphere 3.3, you can import a GitHub, GitLab, Bitbucket, or Git-based repo
|
|||
{{</ notice >}}
|
||||
|
||||
7. In the GitHub repositories that are displayed, select a repository, and click **OK**.
|
||||
8. Click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the imported code repository, and you can perform the following operations:
|
||||
8. Click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the imported code repository, and you can perform the following operations:
|
||||
|
||||
- **Edit**: Edits the alias and description of the code repository and reselects a code repository.
|
||||
- **Edit YAML**: Edits the YAML file of the code repository.
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ This section walks you through the process of deploying an application using a c
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
2. Click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the continuous deployment, and you can perform the following:
|
||||
2. Click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the continuous deployment, and you can perform the following:
|
||||
- **Edit Information**: edits the alias and description.
|
||||
- **Edit YAML**: edits the YAML file.
|
||||
- **Sync**: triggers resources synchronization.
|
||||
|
|
@ -393,7 +393,7 @@ This section walks you through the process of deploying an application using a c
|
|||
|
||||
1. Go to the project where the continuous deployment resides, in the left-side navigation pane, click **Services**.
|
||||
|
||||
2. On the **Services** page on the left, click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the deployed application, and click **Edit External Access**.
|
||||
2. On the **Services** page on the left, click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the deployed application, and click **Edit External Access**.
|
||||
|
||||
3. In **Access Mode**, select **NodePort**, and click **OK**.
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ In **DevOps Project Roles**, there are three available built-in roles as shown b
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||
4. Newly created roles will be listed in **DevOps Project Roles**. You can click <img src="/images/docs/v3.3/common-icons/three-dots.png" height="15px" alt="icon"> on the right to edit it.
|
||||
4. Newly created roles will be listed in **DevOps Project Roles**. You can click <img src="/images/docs/v3.x/common-icons/three-dots.png" height="15px" alt="icon"> on the right to edit it.
|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -61,7 +61,7 @@ In **DevOps Project Roles**, there are three available built-in roles as shown b
|
|||
|
||||
1. In **DevOps Project Settings**, select **DevOps Project Members** and click **Invite**.
|
||||
|
||||
2. Click <img src="/images/docs/v3.3/common-icons/invite-member-button.png" height="15px" alt="icon"> to invite a user to the DevOps project. Grant the role of `pipeline-creator` to the account.
|
||||
2. Click <img src="/images/docs/v3.x/common-icons/invite-member-button.png" height="15px" alt="icon"> to invite a user to the DevOps project. Grant the role of `pipeline-creator` to the account.
|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ In the current version, there are 4 types of built-in podTemplates, i.e. `base`,
|
|||
|
||||
You can use the built-in podTemplate by specifying the label for an agent. For example, to use the nodejs podTemplate, you can set the label to `nodejs` when creating the Pipeline, as shown in the example below.
|
||||
|
||||

|
||||

|
||||
|
||||
```groovy
|
||||
pipeline {
|
||||
|
|
|
|||
|
|
@ -115,11 +115,11 @@ Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeli
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
2. To add a stage, click the plus icon on the left. Click the box above the **Add Step** area and set a name (for example, `Checkout SCM`) for the stage in the field **Name** on the right.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Step**. Select **git** from the list as the example code is pulled from GitHub. In the displayed dialog box, fill in the required field. Click **OK** to finish.
|
||||
|
||||
|
|
@ -127,21 +127,21 @@ Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeli
|
|||
- **Name**. You do not need to enter the Credential ID for this tutorial.
|
||||
- **Branch**. It defaults to the master branch if you leave it blank. Enter `sonarqube` or leave it blank if you do not need the code analysis stage.
|
||||
|
||||

|
||||

|
||||
|
||||
4. The first stage is now set.
|
||||
|
||||

|
||||

|
||||
|
||||
#### Stage 2: Unit test
|
||||
|
||||
1. Click the plus icon on the right of stage 1 to add a new stage to perform a unit test in the container. Name it `Unit Test`.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Add Step** and select **container** from the list. Name it `maven` and then click **OK**.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Nesting Steps** to add a nested step under the `maven` container. Select **shell** from the list and enter the following command in the command line. Click **OK** to save it.
|
||||
|
||||
|
|
@ -162,27 +162,27 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
|
||||
1. Click the plus icon on the right of the `Unit Test` stage to add a stage for SonarQube code analysis in the container. Name it `Code Analysis`.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Add Step** under **Task** in **Code Analysis** and select **container**. Name it `maven` and click **OK**.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Click **withCredentials** and select the SonarQube token (`sonar-token`) from the **Name** list. Enter `SONAR_TOKEN` for **Text Variable**, then click **OK**.
|
||||
|
||||

|
||||

|
||||
|
||||
4. Under the **withCredentials** step, click **Add Nesting Steps** to add a nested step for it.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Click **withSonarQubeEnv**. In the displayed dialog box, do not change the default name `sonar` and click **OK** to save it.
|
||||
|
||||

|
||||

|
||||
|
||||
6. Under the **withSonarQubeEnv** step, click **Add Nesting Steps** to add a nested step for it.
|
||||
|
||||

|
||||

|
||||
|
||||
7. Click **shell** and enter the following command in the command line for the sonarqube branch and authentication. Click **OK** to finish.
|
||||
|
||||
|
|
@ -190,29 +190,29 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
8. Click **Add Nesting Steps** (the third one) for the **container** step directly and select **timeout**. Enter `1` for time and select **Hours** for unit. Click **OK** to finish.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
9. Click **Add Nesting Steps** for the **timeout** step and select **waitForQualityGate**. Select **Start the follow-up task after the inspection** in the displayed dialog box. Click **OK** to save it.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
#### Stage 4: Build and push the image
|
||||
|
||||
1. Click the plus icon on the right of the previous stage to add a new stage to build and push images to Docker Hub. Name it `Build and Push`.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Add Step** under **Task** and select **container**. Name it `maven`, and then click **OK**.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Select **shell** from the list, and enter the following command in the displayed dialog box. Click **OK** to finish.
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
mvn -Dmaven.test.skip=true clean package
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
4. Click **Add Nesting Steps** again and select **shell**. Enter the following command in the command line to build a Docker image based on the [Dockerfile](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Dockerfile-online). Click **OK** to confirm.
|
||||
|
||||
|
|
@ -234,7 +234,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER .
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
5. Click **Add Nesting Steps** again and select **withCredentials**. Fill in the following fields in the displayed dialog box. Click **OK** to confirm.
|
||||
|
||||
|
|
@ -248,7 +248,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
6. Click **Add Nesting Steps** (the first one) in the **withCredentials** step created above. Select **shell** and enter the following command in the displayed dialog box, which is used to log in to Docker Hub. Click **OK** to confirm.
|
||||
|
||||
|
|
@ -256,7 +256,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
7. Click **Add nesting steps** in the **withCredentials** step. Select **shell** and enter the following command to push the SNAPSHOT image to Docker Hub. Click **OK** to finish.
|
||||
|
||||
|
|
@ -264,27 +264,27 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
#### Stage 5: Generate the artifact
|
||||
|
||||
1. Click the plus icon on the right of the **Build and Push** stage to add a new stage to save artifacts and name it `Artifacts`. This example uses a JAR package.
|
||||
|
||||

|
||||

|
||||
|
||||
2. With the **Artifacts** stage selected, click **Add Step** under **Task** and select **archiveArtifacts**. Enter `target/*.jar` in the displayed dialog box, which is used to set the archive path of artifacts in Jenkins. Click **OK** to finish.
|
||||
|
||||

|
||||

|
||||
|
||||
#### Stage 6: Deploy to development
|
||||
|
||||
1. Click the plus icon on the right of the stage **Artifacts** to add the last stage. Name it `Deploy to Dev`. This stage is used to deploy resources to your development environment (namely, the project of `kubesphere-sample-dev`).
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Add Step** under the **Deploy to Dev** stage. Select **input** from the list and enter `@project-admin` in the **Message** field, which means the account `project-admin` will review this pipeline when it runs to this stage. Click **OK** to save it.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -320,7 +320,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
On the **Pipelines** page, you can click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch.
|
||||
On the **Pipelines** page, you can click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch.
|
||||
|
||||
{{</ notice >}}
|
||||
|
||||
|
|
@ -328,13 +328,13 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
|
||||
1. You need to manually run the pipeline that is created through the graphical editing panel. Click **Run**, and you can see three string parameters defined in Step 3. Click **OK** to run the pipeline.
|
||||
|
||||

|
||||

|
||||
|
||||
2. To see the status of a pipeline, go to the **Run Records** tab and click the record you want to view.
|
||||
|
||||
3. Wait for a while and the pipeline stops at the stage **Deploy to Dev** if it runs successfully. As the reviewer of the pipeline, `project-admin` needs to approve it before resources are deployed to the development environment.
|
||||
|
||||

|
||||

|
||||
|
||||
4. Log out of KubeSphere and log back in to the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Run Records** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**.
|
||||
|
||||
|
|
@ -350,13 +350,13 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
|
|||
|
||||
Click the **Artifacts** tab and then click the icon on the right to download the artifact.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 8: View code analysis results
|
||||
|
||||
On the **Code Check** page, view the code analysis result of this example pipeline, which is provided by SonarQube. If you do not configure SonarQube in advance, this section is not available. For more information, see [Integrate SonarQube into Pipelines](../../../how-to-integrate/sonarqube/).
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 9: Verify Kubernetes resources
|
||||
|
||||
|
|
@ -374,7 +374,7 @@ On the **Code Check** page, view the code analysis result of this example pipeli
|
|||
|
||||
4. Now that the pipeline has run successfully, an image will be pushed to Docker Hub. Log in to Docker Hub and check the result.
|
||||
|
||||

|
||||

|
||||
|
||||
5. The app is named `devops-sample` as it is the value of `APP_NAME` and the tag is the value of `SNAPSHOT-$BUILD_NUMBER`. `$BUILD_NUMBER` is the serial number of a record under the **Run Records** tab.
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ There are eight stages as shown below in this example pipeline.
|
|||
|
||||
3. You also need to create a GitHub personal access token with the permission as shown in the below image, and then use the generated token to create Account Credentials (for example, `github-token`) for GitHub authentication in your DevOps project.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ There are eight stages as shown below in this example pipeline.
|
|||
|
||||
3. Click the edit icon on the right to edit environment variables.
|
||||
|
||||

|
||||

|
||||
|
||||
| Items | Value | Description |
|
||||
| :--- | :--- | :--- |
|
||||
|
|
@ -185,7 +185,7 @@ The account `project-admin` needs to be created in advance since it is the revie
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
- You can click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch.
|
||||
- You can click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch.
|
||||
- The pipeline details page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Successful** icon if the synchronization is successful.
|
||||
|
||||
{{</ notice >}}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ The built-in Jenkins cannot share the same email configuration with the platform
|
|||
|
||||
2. If you have enabled the [multi-cluster feature](../../../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly.
|
||||
|
||||
3. Go to **Workloads** under **Application Workloads**, and select the project **kubesphere-devops-system** from the drop-down list. Click <img src="/images/docs/v3.3/common-icons/three-dots.png" height="15" alt="icon" /> on the right of `devops-jenkins` and select **Edit YAML** to edit its YAML.
|
||||
3. Go to **Workloads** under **Application Workloads**, and select the project **kubesphere-devops-system** from the drop-down list. Click <img src="/images/docs/v3.x/common-icons/three-dots.png" height="15" alt="icon" /> on the right of `devops-jenkins` and select **Edit YAML** to edit its YAML.
|
||||
|
||||
4. Scroll down to the fields in the image below which you need to specify. Click **OK** when you finish to save changes.
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ The built-in Jenkins cannot share the same email configuration with the platform
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
| Environment Variable Name | Description |
|
||||
| ------------------------- | -------------------------------- |
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ KubeSphere has the Jenkins Configuration as Code plugin installed by default to
|
|||
|
||||
Besides, you can find the `formula.yaml` file in the repository [ks-jenkins](https://github.com/kubesphere/ks-jenkins), where you can view plugin versions and customize these versions based on your needs.
|
||||
|
||||

|
||||

|
||||
|
||||
## Modify the ConfigMap
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ This tutorial demonstrates how to trigger a pipeline by using a webhook.
|
|||
|
||||
2. Go to `/deploy/dev-ol/` and click the file `devops-sample.yaml`.
|
||||
|
||||
3. Click <img src="/images/docs/v3.3/devops-user-guide/using-devops/pipeline-webhook/edit-btn.png" width="20px" alt="icon" /> to edit the file. For example, change the value of `spec.replicas` to `3`.
|
||||
3. Click <img src="/images/docs/v3.x/devops-user-guide/using-devops/pipeline-webhook/edit-btn.png" width="20px" alt="icon" /> to edit the file. For example, change the value of `spec.replicas` to `3`.
|
||||
|
||||
4. Click **Commit changes** at the bottom of the page.
|
||||
|
||||
|
|
|
|||
|
|
@ -89,16 +89,16 @@ The following briefly introduces the CI and CI & CD pipeline templates.
|
|||
|
||||
- CI pipeline template
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
The CI pipeline template contains two stages. The **clone code** stage checks out code and the **build & push** stage builds an image and pushes it to Docker Hub. You need to create credentials for your code repository and your Docker Hub registry in advance, and then set the URL of your repository and these credentials in corresponding steps. After you finish editing, the pipeline is ready to run.
|
||||
|
||||
- CI & CD pipeline template
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
The CI & CD pipeline template contains six stages. For more information about each stage, refer to [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/#pipeline-overview), where you can find similar stages and the descriptions. You need to create credentials for your code repository, your Docker Hub registry, and the kubeconfig of your cluster in advance, and then set the URL of your repository and these credentials in corresponding steps. After you finish editing, the pipeline is ready to run.
|
||||
|
|
@ -6,7 +6,7 @@ layout: "second"
|
|||
linkTitle: "FAQ"
|
||||
weight: 16000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
This chapter answers and summarizes the questions users ask most frequently about KubeSphere. You can find these questions and answers in their respective sections which are grouped based on KubeSphere functions.
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ For more information about creating a Kubernetes namespace, see [Namespaces Walk
|
|||
|
||||
1. Log in to the KubeSphere console as `admin` and go to the **Cluster Management** page. Click **Projects**, and you can see all your projects running on the current cluster, including the one just created.
|
||||
|
||||
2. The namespace created through kubectl does not belong to any workspace. Click <img src="/images/docs/v3.3/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/three-dots.png" height="20px"> on the right and select **Assign Workspace**.
|
||||
2. The namespace created through kubectl does not belong to any workspace. Click <img src="/images/docs/v3.x/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/three-dots.png" height="20px"> on the right and select **Assign Workspace**.
|
||||
|
||||
3. In the dialog that appears, select a **Workspace** and a **Project Administrator** for the project and click **OK**.
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Here are some of the frequently asked questions about user login failure.
|
|||
|
||||
You may see an image below when the login fails. To find out the reason and solve the issue, perform the following steps:
|
||||
|
||||

|
||||

|
||||
|
||||
1. Execute the following command to check the status of the user.
|
||||
|
||||
|
|
@ -86,7 +86,7 @@ kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spe
|
|||
|
||||
## Wrong Username or Password
|
||||
|
||||

|
||||

|
||||
|
||||
Run the following command to verify that the username and the password are correct.
|
||||
|
||||
|
|
|
|||
|
|
@ -22,4 +22,4 @@ You have installed KubeSphere.
|
|||
|
||||
3. On the **Basic Information** page, select a desired language from the **Language** drop-down list.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/faq/kubesphere-web-console/change-console-language/check-mark.png" width='25' alt="icon" /> to save it.
|
||||
4. Click <img src="/images/docs/v3.x/faq/kubesphere-web-console/change-console-language/check-mark.png" width='25' alt="icon" /> to save it.
|
||||
|
|
@ -8,4 +8,4 @@ Weight: 16510
|
|||
|
||||
The KubeSphere web console supports major web browsers including **Chrome, Firefox, Safari, Opera, and Edge.** You only need to consider the supported versions of these browsers listed in the green box of the table below:
|
||||
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ Editing resources in `system-workspace` may cause unexpected results, such as Ku
|
|||
|
||||
## Edit the Console Configuration
|
||||
|
||||
1. Log in to KubeSphere as `admin`. Click <img src="/images/docs/v3.3/common-icons/hammer.png" height="25" width="25" alt="icon" /> in the lower-right corner and select **Kubectl**.
|
||||
1. Log in to KubeSphere as `admin`. Click <img src="/images/docs/v3.x/common-icons/hammer.png" height="25" width="25" alt="icon" /> in the lower-right corner and select **Kubectl**.
|
||||
|
||||
2. Execute the following command:
|
||||
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ If you have trouble deploying applications into your project when running a pipe
|
|||
|
||||
2. The output is similar to the following:
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 3: Create a DevOps kubeconfig
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ If you have enabled [the multi-cluster feature](../../../multicluster-management
|
|||
|
||||
3. Enter `clusterconfiguration` in the search bar and click the result to go to its detail page.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/faq/installation/telemetry-in-kubesphere/three-dots.png" height="20px" alt="icon"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
4. Click <img src="/images/docs/v3.x/faq/installation/telemetry-in-kubesphere/three-dots.png" height="20px" alt="icon"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
|
||||
5. Scroll down to the bottom of the file, add `telemetry_enabled: false`, and then click **OK**.
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ layout: "second"
|
|||
linkTitle: "Installing on Kubernetes"
|
||||
weight: 4000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
This chapter demonstrates how to deploy KubeSphere on existing Kubernetes clusters hosted on cloud or on-premises. As a highly flexible solution to container orchestration, KubeSphere can be deployed across various Kubernetes engines.
|
||||
|
|
@ -15,14 +15,14 @@ This chapter demonstrates how to deploy KubeSphere on existing Kubernetes cluste
|
|||
|
||||
Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first.
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/brand-icons/gke.jpg" title="Deploy KubeSphere on GKE" description="Provision KubeSphere on existing Kubernetes clusters on GKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/brand-icons/gke.jpg" title="Deploy KubeSphere on GKE" description="Provision KubeSphere on existing Kubernetes clusters on GKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/" >}}
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/bitmap.jpg" title="Deploy KubeSphere on AWS EKS" description="Provision KubeSphere on existing Kubernetes clusters on EKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/bitmap.jpg" title="Deploy KubeSphere on AWS EKS" description="Provision KubeSphere on existing Kubernetes clusters on EKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}}
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/brand-icons/aks.jpg" title="Deploy KubeSphere on AKS" description="Provision KubeSphere on existing Kubernetes clusters on AKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/brand-icons/aks.jpg" title="Deploy KubeSphere on AKS" description="Provision KubeSphere on existing Kubernetes clusters on AKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/" >}}
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/brand-icons/huawei.svg" title="Deploy KubeSphere on CCE" description="Provision KubeSphere on existing Kubernetes clusters on Huawei CCE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/brand-icons/huawei.svg" title="Deploy KubeSphere on CCE" description="Provision KubeSphere on existing Kubernetes clusters on Huawei CCE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce/" >}}
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/brand-icons/oracle.jpg" title="Deploy KubeSphere on Oracle OKE" description="Provision KubeSphere on existing Kubernetes clusters on OKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/brand-icons/oracle.jpg" title="Deploy KubeSphere on Oracle OKE" description="Provision KubeSphere on existing Kubernetes clusters on OKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/" >}}
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/brand-icons/digital-ocean.jpg" title="Deploy KubeSphere on DO" description="Provision KubeSphere on existing Kubernetes clusters on DigitalOcean." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/brand-icons/digital-ocean.jpg" title="Deploy KubeSphere on DO" description="Provision KubeSphere on existing Kubernetes clusters on DigitalOcean." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/" >}}
|
||||
|
|
|
|||
|
|
@ -16,11 +16,11 @@ Azure can help you implement infrastructure as code by providing resource deploy
|
|||
|
||||
You don't have to install Azure CLI on your machine as Azure provides a web-based terminal. Click the Cloud Shell button on the menu bar at the upper-right corner in Azure portal.
|
||||
|
||||

|
||||

|
||||
|
||||
Select **Bash** Shell.
|
||||
|
||||

|
||||

|
||||
|
||||
### Create a Resource Group
|
||||
|
||||
|
|
@ -62,15 +62,15 @@ aks-nodepool1-23754246-vmss000000 Ready agent 38m v1.16.13
|
|||
|
||||
After you execute all the commands above, you can see there are 2 Resource Groups created in Azure Portal.
|
||||
|
||||

|
||||

|
||||
|
||||
Azure Kubernetes Services itself will be placed in `KubeSphereRG`.
|
||||
|
||||

|
||||

|
||||
|
||||
All the other Resources will be placed in `MC_KubeSphereRG_KuberSphereCluster_westus`, such as VMs, Load Balancer and Virtual Network.
|
||||
|
||||

|
||||

|
||||
|
||||
## Deploy KubeSphere on AKS
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ description: 'Learn how to deploy KubeSphere on DigitalOcean.'
|
|||
weight: 4230
|
||||
---
|
||||
|
||||

|
||||

|
||||
|
||||
This guide walks you through the steps of deploying KubeSphere on [DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/).
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ This guide walks you through the steps of deploying KubeSphere on [DigitalOcean
|
|||
|
||||
A Kubernetes cluster in DO is a prerequisite for installing KubeSphere. Go to your [DO account](https://cloud.digitalocean.com/) and refer to the image below to create a cluster from the navigation menu.
|
||||
|
||||

|
||||

|
||||
|
||||
You need to select:
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ You need to select:
|
|||
4. Cluster capacity (for example, 2 standard nodes with 2 vCPUs and 4GB of RAM each)
|
||||
5. A name for the cluster (for example, *kubesphere-3*)
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ You need to select:
|
|||
|
||||
When the cluster is ready, you can download the config file for kubectl.
|
||||
|
||||

|
||||

|
||||
|
||||
## Install KubeSphere on DOKS
|
||||
|
||||
|
|
@ -82,23 +82,23 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere b
|
|||
|
||||
- Go to the Kubernetes Dashboard provided by DigitalOcean.
|
||||
|
||||

|
||||

|
||||
|
||||
- Select the **kubesphere-system** namespace.
|
||||
|
||||

|
||||

|
||||
|
||||
- In **Services** under **Service**, edit the service **ks-console**.
|
||||
|
||||

|
||||

|
||||
|
||||
- Change the type from `NodePort` to `LoadBalancer`. Save the file when you finish.
|
||||
|
||||

|
||||

|
||||
|
||||
- Access the KubeSphere's web console using the endpoint generated by DO.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice tip >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,15 +17,15 @@ pip3 install awscli --upgrade --user
|
|||
```
|
||||
|
||||
Check the installation with `aws --version`.
|
||||

|
||||

|
||||
|
||||
## Prepare an EKS Cluster
|
||||
|
||||
1. A standard Kubernetes cluster in AWS is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster.
|
||||

|
||||

|
||||
|
||||
2. On the **Configure cluster** page, fill in the following fields:
|
||||

|
||||

|
||||
|
||||
- Name: A unique name for your cluster.
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ Check the installation with `aws --version`.
|
|||
- Tags (Optional): Add any tags to your cluster. For more information, see [Tagging your Amazon EKS resources](https://docs.aws.amazon.com/eks/latest/userguide/eks-using-tags.html).
|
||||
|
||||
3. Select **Next**. On the **Specify networking** page, select values for the following fields:
|
||||

|
||||

|
||||
|
||||
- VPC: The VPC that you created previously in [Create your Amazon EKS cluster VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create). You can find the name of your VPC in the drop-down list.
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ Check the installation with `aws --version`.
|
|||
- Security groups: The SecurityGroups value from the AWS CloudFormation output that you generated with [Create your Amazon EKS cluster VPC](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#vpc-create). This security group has ControlPlaneSecurityGroup in the drop-down name.
|
||||
|
||||
- For **Cluster endpoint access**, choose one of the following options:
|
||||

|
||||

|
||||
|
||||
- Public: Enables only public access to your cluster's Kubernetes API server endpoint. Kubernetes API requests that originate from outside of your cluster's VPC use the public endpoint. By default, access is allowed from any source IP address. You can optionally restrict access to one or more CIDR ranges such as 192.168.0.0/16, for example, by selecting **Advanced settings** and then selecting **Add source**.
|
||||
|
||||
|
|
@ -62,20 +62,20 @@ Check the installation with `aws --version`.
|
|||
- Public and private: Enables public and private access.
|
||||
|
||||
4. Select **Next**. On the **Configure logging** page, you can optionally choose which log types that you want to enable. By default, each log type is **Disabled**. For more information, see [Amazon EKS control plane logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html).
|
||||

|
||||

|
||||
|
||||
5. Select **Next**. On the **Review and create page**, review the information that you entered or selected on the previous pages. Select **Edit** if you need to make changes to any of your selections. Once you're satisfied with your settings, select **Create**. The **Status** field shows **CREATING** until the cluster provisioning process completes.
|
||||

|
||||

|
||||
|
||||
- For more information about the previous options, see [Modifying cluster endpoint access](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html#modify-endpoint-access).
|
||||
When your cluster provisioning is complete (usually between 10 and 15 minutes), save the API server endpoint and Certificate authority values. These are used in your kubectl configuration.
|
||||

|
||||

|
||||
|
||||
6. Create **Node Group** and define 3 nodes in this cluster.
|
||||

|
||||

|
||||
|
||||
7. Configure the node group.
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -166,10 +166,10 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere b
|
|||
```
|
||||
|
||||
- Edit the configuration of the service **ks-console** by executing `kubectl edit ks-console` and change `type` from `NodePort` to `LoadBalancer`. Save the file when you finish.
|
||||

|
||||

|
||||
|
||||
- Run `kubectl get svc -n kubesphere-system` and get your external IP.
|
||||

|
||||

|
||||
|
||||
- Access the web console of KubeSphere using the external IP generated by EKS.
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ First, create a Kubernetes cluster based on the requirements below.
|
|||
- Go to **Resource Management** > **Cluster Management** > **Basic Information** > **Network**, and bind `Public apiserver`.
|
||||
- Select **kubectl** on the right column, go to **Download kubectl configuration file**, and click **Click here to download**, then you will get a public key for kubectl.
|
||||
|
||||

|
||||

|
||||
|
||||
After you get the configuration file for kubectl, use kubectl command line to verify the connection to the cluster.
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3
|
|||
|
||||
Go to **Workload** > **Pod**, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. Check `ks-console-xxxx` of the namespace to understand the availability of KubeSphere console.
|
||||
|
||||

|
||||

|
||||
|
||||
### Expose KubeSphere Console
|
||||
|
||||
|
|
@ -91,11 +91,11 @@ Check the running status of Pods in `kubesphere-system` namespace and make sure
|
|||
|
||||
Go to **Resource Management** > **Network** and choose the service in `ks-console`. It is suggested that you choose `LoadBalancer` (Public IP is required). The configuration is shown below.
|
||||
|
||||

|
||||

|
||||
|
||||
Default settings are OK for other detailed configurations. You can also set them based on your needs.
|
||||
|
||||

|
||||

|
||||
|
||||
After you set LoadBalancer for KubeSphere console, you can visit it via the given address. Go to KubeSphere login page and use the default account (username `admin` and password `P@88w0rd`) to log in.
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ linkTitle: "Overview"
|
|||
weight: 4110
|
||||
---
|
||||
|
||||

|
||||

|
||||
|
||||
As part of KubeSphere's commitment to provide a plug-and-play architecture for users, it can be easily installed on existing Kubernetes clusters. More specifically, KubeSphere can be deployed on Kubernetes either hosted on clouds (for example, AWS EKS, QingCloud QKE and Google GKE) or on-premises. This is because KubeSphere does not hack Kubernetes itself. It only interacts with the Kubernetes API to manage Kubernetes cluster resources. In other words, KubeSphere can be installed on any native Kubernetes cluster and Kubernetes distribution.
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ After you make sure your existing Kubernetes cluster meets all the requirements,
|
|||
|
||||
4. Make sure port 30880 is opened in security groups and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`).
|
||||
|
||||

|
||||

|
||||
|
||||
## Enable Pluggable Components (Optional)
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ You can use Harbor or any other private image registries. This tutorial uses Doc
|
|||
|
||||
2. Make sure you specify a domain name in the field `Common Name` when you are generating your own certificate. For instance, the field is set to `dockerhub.kubekey.local` in this example.
|
||||
|
||||

|
||||

|
||||
|
||||
### Start the Docker registry
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ layout: "second"
|
|||
linkTitle: "Installing on Linux"
|
||||
weight: 3000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
This chapter demonstrates how to use KubeKey to provision a production-ready Kubernetes and KubeSphere cluster on Linux in different environments. You can also use KubeKey to easily scale out and in your cluster and set various storage classes based on your needs.
|
||||
|
|
@ -14,4 +14,4 @@ This chapter demonstrates how to use KubeKey to provision a production-ready Kub
|
|||
|
||||
Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first.
|
||||
|
||||
{{< popularPage icon="/images/docs/v3.3/qingcloud-2.svg" title="Deploy KubeSphere on QingCloud" description="Provision an HA KubeSphere cluster on QingCloud." link="../installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms/" >}}
|
||||
{{< popularPage icon="/images/docs/v3.x/qingcloud-2.svg" title="Deploy KubeSphere on QingCloud" description="Provision an HA KubeSphere cluster on QingCloud." link="../installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms/" >}}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ weight: 3630
|
|||
|
||||
KubeSphere leverages [KubeEdge](https://kubeedge.io/en/), to extend native containerized application orchestration capabilities to hosts at edge. With separate cloud and edge core modules, KubeEdge provides complete edge computing solutions while the installation may be complex and difficult.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -129,7 +129,7 @@ Perform the following steps to configure [EdgeMesh](https://kubeedge.io/en/docs/
|
|||
|
||||
3. Click **Add**. In the dialog that appears, set a node name and enter an internal IP address of your edge node. Click **Validate** to continue.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -140,7 +140,7 @@ Perform the following steps to configure [EdgeMesh](https://kubeedge.io/en/docs/
|
|||
|
||||
4. Copy the command automatically created under **Edge Node Configuration Command** and run it on your edge node.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -166,7 +166,7 @@ To collect monitoring information on edge node, you need to enable `metrics_serv
|
|||
|
||||
3. In the search bar on the right pane, enter `clusterconfiguration`, and click the result to go to its details page.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of ks-installer, and click **Edit YAML**.
|
||||
4. Click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of ks-installer, and click **Edit YAML**.
|
||||
|
||||
5. Search for **metrics_server**, and change the value of `enabled` from `false` to `true`.
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ This tutorial demonstrates the general configurations of a high-availability clu
|
|||
|
||||
Make sure you have prepared six Linux machines before you begin, with three of them serving as control plane nodes and the other three as worker nodes. The following image shows details of these machines, including their private IP address and role. For more information about system and network requirements, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts).
|
||||
|
||||

|
||||

|
||||
|
||||
## Configure a Load Balancer
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ This document describes how to use the built-in high availability mode when inst
|
|||
|
||||
The following figure shows the example architecture of the built-in high availability mode. For more information about system and network requirements, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts).
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ This tutorial demonstrates how to configure Keepalived and HAproxy for load bala
|
|||
|
||||
The example cluster has three master nodes, three worker nodes, two nodes for load balancing and one virtual IP address. The virtual IP address in this example may also be called "a floating IP address". That means in the event of node failures, the IP address can be passed between nodes allowing for failover, thus achieving high availability.
|
||||
|
||||

|
||||

|
||||
|
||||
Notice that in this example, Keepalived and HAproxy are not installed on any of the master nodes. Admittedly, you can do that and high availability can also be achieved. That said, configuring two specific nodes for load balancing (You can add more nodes of this kind as needed) is more secure. Only Keepalived and HAproxy will be installed on these two nodes, avoiding any potential conflicts with any Kubernetes components and services.
|
||||
|
||||
|
|
|
|||
|
|
@ -500,7 +500,7 @@ In KubeKey v2.1.0, we bring in concepts of manifest and artifact, which provides
|
|||
|
||||
Method 2: Log in to Harbor and create a project. Set the project to **Public**, so that any user can pull images from this project. For more information, please refer to [Create Projects]( https://goharbor.io/docs/1.10/working-with-projects/create-projects/).
|
||||
|
||||

|
||||

|
||||
|
||||
6. Run the following command again to modify the cluster configuration file:
|
||||
|
||||
|
|
@ -581,7 +581,7 @@ In KubeKey v2.1.0, we bring in concepts of manifest and artifact, which provides
|
|||
|
||||
9. Access KubeSphere's web console at `http://{IP}:30880` using the default account and password `admin/P@88w0rd`.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ To access the console, you may need to configure port forwarding rules depending
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
## Enable kubectl Autocompletion
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ This tutorial walks you through an example of how to create Keepalived and HAPro
|
|||
|
||||
## Architecture
|
||||
|
||||

|
||||

|
||||
|
||||
## Prepare Linux Hosts
|
||||
|
||||
|
|
@ -45,41 +45,41 @@ You do not need to create a virtual machine for `vip` (i.e. Virtual IP) above, s
|
|||
|
||||
You can follow the New Virtual Machine wizard to create a virtual machine to place in the VMware Host Client inventory.
|
||||
|
||||

|
||||

|
||||
|
||||
1. In the first step **Select a creation type**, you can deploy a virtual machine from an OVF or OVA file, or register an existing virtual machine directly.
|
||||
|
||||

|
||||

|
||||
|
||||
2. When you create a new virtual machine, provide a unique name for the virtual machine to distinguish it from existing virtual machines on the host you are managing.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Select a compute resource and storage (datastore) for the configuration and disk files. You can select the datastore that has the most suitable properties, such as size, speed, and availability, for your virtual machine storage.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
4. Select a guest operating system. The wizard will provide the appropriate defaults for the operating system installation.
|
||||
|
||||

|
||||

|
||||
|
||||
5. Before you finish deploying a new virtual machine, you have the option to set **Virtual Hardware** and **VM Options**. You can refer to the images below for part of the fields.
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
6. In **Ready to complete** page, you review the configuration selections that you have made for the virtual machine. Click **Finish** at the bottom-right corner to continue.
|
||||
|
||||

|
||||

|
||||
|
||||
## Install a Load Balancer using Keepalived and HAProxy
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ To make sure the platform can create cloud disks for your cluster, you need to p
|
|||
|
||||
1. Log in to the web console of [QingCloud](https://console.qingcloud.com/login) and select **Access Key** from the drop-down list in the top-right corner.
|
||||
|
||||

|
||||

|
||||
|
||||
2. Click **Create** to generate keys. Download the key after it is created, which is stored in a csv file.
|
||||
|
||||
|
|
@ -47,7 +47,7 @@ The separate configuration file contains all parameters of QingCloud CSI which w
|
|||
|
||||
2. The field `zone` specifies where your cloud disks are created. On QingCloud Platform, you must select a zone before you create them.
|
||||
|
||||

|
||||

|
||||
|
||||
Make sure the value you specify for `zone` matches the region ID below:
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ Besides these VMs, other resources like Load Balancer, Virtual Network and Netwo
|
|||
|
||||
Six machines of **Ubuntu 18.04** will be deployed in an Azure Resource Group. Three of them are grouped into an availability set, serving as both the control plane and etcd nodes. The other three VMs will be defined as a VMSS where Worker nodes will be running.
|
||||
|
||||

|
||||

|
||||
|
||||
These VMs will be attached to a load balancer. There are two predefined rules in the load balancer:
|
||||
|
||||
|
|
@ -60,7 +60,7 @@ You don't have to create these resources one by one. According to the best pract
|
|||
|
||||
4. Copy your public SSH key for the field **Admin Key**. Alternatively, create a new one with `ssh-keygen`.
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -74,7 +74,7 @@ Password authentication is restricted in Linux configurations. Only SSH is accep
|
|||
|
||||
After successfully created, all the resources will display in the resource group `KubeSphereVMRG`. Record the public IP of the load balancer and the private IP addresses of the VMs. You will need them later.
|
||||
|
||||

|
||||

|
||||
|
||||
## Deploy Kubernetes and KubeSphere
|
||||
|
||||
|
|
@ -259,6 +259,6 @@ Azure Virtual Network doesn't support the IPIP mode used by [Calico](https://doc
|
|||
As the Kubernetes cluster is set up on Azure instances directly, the load balancer is not integrated with [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). However, you can still manually map the NodePort to the load balancer. There are 2 steps required.
|
||||
|
||||
1. Create a new Load Balance Rule in the load balancer.
|
||||

|
||||

|
||||
2. Create an Inbound Security rule to allow Internet access in the Network Security Group.
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ This tutorial walks you through an example of how to create two [QingCloud load
|
|||
|
||||
This example prepares six machines of **Ubuntu 16.04.6**. You will create two load balancers, and deploy three control plane nodes and etcd nodes on three of the machines. You can configure these control plane and etcd nodes in `config-sample.yaml` created by KubeKey (Please note that this is the default name, which can be changed by yourself).
|
||||
|
||||

|
||||

|
||||
|
||||
{{< notice note >}}
|
||||
|
||||
|
|
@ -40,15 +40,15 @@ This step demonstrates how to create load balancers on the QingCloud platform.
|
|||
|
||||
1. Log in to the [QingCloud console](https://console.qingcloud.com/login). In the menu on the left, under **Network & CDN**, select **Load Balancers**. Click **Create** to create a load balancer.
|
||||
|
||||

|
||||

|
||||
|
||||
2. In the pop-up window, set a name for the load balancer. Choose the VxNet where your machines are created from the **Network** drop-down list. Here is `pn`. Other fields can be default values as shown below. Click **Submit** to finish.
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click the load balancer. On the detail page, create a listener that listens on port `6443` with the **Listener Protocol** set to `TCP`.
|
||||
|
||||

|
||||

|
||||
|
||||
- **Name**: Define a name for this Listener
|
||||
- **Listener Protocol**: Select `TCP` protocol
|
||||
|
|
@ -65,7 +65,7 @@ This step demonstrates how to create load balancers on the QingCloud platform.
|
|||
|
||||
4. Click **Add Backend**, and choose the VxNet you just selected (in this example, it is `pn`). Click **Advanced Search**, choose the three control plane nodes, and set the port to `6443` which is the default secure port of api-server.
|
||||
|
||||

|
||||

|
||||
|
||||
Click **Submit** when you finish.
|
||||
|
||||
|
|
@ -77,7 +77,7 @@ This step demonstrates how to create load balancers on the QingCloud platform.
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
Record the Intranet VIP shown under **Networks**. The IP address will be added later to the configuration file.
|
||||
|
||||
|
|
@ -93,7 +93,7 @@ Two elastic IPs are needed for this tutorial, one for the VPC network and the ot
|
|||
|
||||
1. Similarly, create an external load balancer while don't select VxNet for the **Network** field. Bind the EIP that you created to this load balancer by clicking **Add IPv4**.
|
||||
|
||||

|
||||

|
||||
|
||||
2. On the load balancer's detail page, create a listener that listens on port `30880` (NodePort of KubeSphere console) with **Listener Protocol** set to `HTTP`.
|
||||
|
||||
|
|
@ -103,11 +103,11 @@ Two elastic IPs are needed for this tutorial, one for the VPC network and the ot
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Backend**. In **Advanced Search**, choose the `six` machines on which you are going to install KubeSphere within the VxNet `pn`, and set the port to `30880`.
|
||||
|
||||

|
||||

|
||||
|
||||
Click **Submit** when you finish.
|
||||
|
||||
|
|
@ -320,11 +320,11 @@ https://kubesphere.io 2020-08-13 10:50:24
|
|||
|
||||
Now that you have finished the installation, go back to the detail page of both the internal and external load balancers to see the status.
|
||||
|
||||

|
||||

|
||||
|
||||
Both listeners show that the status is **Active**, meaning nodes are up and running.
|
||||
|
||||

|
||||

|
||||
|
||||
In the web console of KubeSphere, you can also see that all the nodes are functioning well.
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ linkTitle: "Introduction"
|
|||
|
||||
weight: 1000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@ KubeSphere integrates **a wide breadth of major ecosystem tools related to Kuber
|
|||
|
||||
KubeSphere also features new capabilities that are not yet available in upstream Kubernetes, alleviating the pain points of Kubernetes including storage, network, security and usability. Not only does KubeSphere allow developers and DevOps teams use their favorite tools in a unified console, but, most importantly, these functionalities are loosely coupled with the platform since they are pluggable and optional.
|
||||
|
||||

|
||||

|
||||
|
|
@ -8,6 +8,6 @@ weight: 1400
|
|||
|
||||
In June 2022, KubeSphere 3.3 has been released with more exciting features. This release introduces GitOps-based continuous deployment and supports Git-based code repository management to further optimize the DevOps feature. Moreover, it also provides enhanced features of storage, multi-tenancy, multi-cluster, observability, app store, service mesh, and edge computing, to further perfect the interactive design for better user experience.
|
||||
|
||||
If you want to know details about new feature of KubeSphere 3.3, you can read the article [KubeSphere 3.3.0: Embrace GitOps](/../../../news/kubesphere-3.3.0-ga-announcement/).
|
||||
If you want to know details about new feature of KubeSphere 3.3, you can read the article [KubeSphere 3.3.0: Embrace GitOps](../../../../news/kubesphere-3.3.0-ga-announcement/).
|
||||
|
||||
In addition to the above highlights, this release also features other functionality upgrades and fixes the known bugs. There were some deprecated or removed features in 3.3. For more and detailed information, see the [Release Notes for 3.3.0](../../../v3.3/release/release-v330/), [Release Notes for 3.3.1](../../../v3.3/release/release-v331/), and [Release Notes for 3.3.2](../../../v3.3/release/release-v332/).
|
||||
|
|
@ -14,7 +14,7 @@ KubeSphere also represents a multi-tenant enterprise-grade [Kubernetes container
|
|||
|
||||
The KubeSphere team developed [KubeKey](https://github.com/kubesphere/kubekey), an open-source brand-new installer, to help enterprises quickly set up a Kubernetes cluster on public clouds or data centers. Users have the option to install Kubernetes only or install both KubeSphere and Kubernetes. KubeKey provides users with different installation options such as all-in-one installation and multi-node installation. It is also an efficient tool to install cloud-native add-ons, and upgrade and scale your Kubernetes cluster.
|
||||
|
||||

|
||||

|
||||
|
||||
## O&M Friendly
|
||||
|
||||
|
|
@ -36,4 +36,4 @@ With the open-source model, the KubeSphere community advances development in an
|
|||
|
||||
KubeSphere is a member of CNCF and a [Kubernetes Conformance Certified platform](https://www.cncf.io/certification/software-conformance/#logos), further enriching [CNCF CLOUD NATIVE Landscape](https://landscape.cncf.io/?landscape=observability-and-analysis&license=apache-license-2-0).
|
||||
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ linkTitle: "Multi-cluster Management"
|
|||
|
||||
weight: 5000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ In multi-cluster environments, if the certificate of a member cluster is about t
|
|||
|
||||
1. Choose **Platform > Cluster Management**.
|
||||
|
||||
2. On the **Cluster Management** page, click <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-app-store/three-dots.png" height="20px" alt="icon"> on the right of the member cluster, and click **Update KubeConfig**.
|
||||
2. On the **Cluster Management** page, click <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-app-store/three-dots.png" height="20px" alt="icon"> on the right of the member cluster, and click **Update KubeConfig**.
|
||||
|
||||
3. In the **Update KubeConfig** dialog box that is diaplayed, enter the new kubeconfig,and click **update**.
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ This tutorial demonstrates how to import an Alibaba Cloud Kubernetes (ACK) clust
|
|||
|
||||
3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/multicluster-management/import-cloud-hosted-k8s/import-ack/three-dots.png" height="20px" alt="icon"> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
4. Click <img src="/images/docs/v3.x/multicluster-management/import-cloud-hosted-k8s/import-ack/three-dots.png" height="20px" alt="icon"> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
|
||||
5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes.
|
||||
|
||||
|
|
@ -57,7 +57,7 @@ This tutorial demonstrates how to import an Alibaba Cloud Kubernetes (ACK) clust
|
|||
|
||||
Log in to the web console of Alibaba Cloud. Go to **Clusters** under **Container Service - Kubernetes**, click your cluster to go to its detail page, and then select the **Connection Information** tab. You can see the kubeconfig file under the **Public Access** tab. Copy the contents of the kubeconfig file.
|
||||
|
||||

|
||||

|
||||
|
||||
### Step 3: Import the ACK member cluster
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ You need to deploy KubeSphere on your EKS cluster first. For more information ab
|
|||
|
||||
3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/multicluster-management/import-cloud-hosted-k8s/import-eks/three-dots.png" height="20px" v> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
4. Click <img src="/images/docs/v3.x/multicluster-management/import-cloud-hosted-k8s/import-eks/three-dots.png" height="20px" v> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
|
||||
5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes.
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ You need to deploy KubeSphere on your GKE cluster first. For more information ab
|
|||
|
||||
3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page.
|
||||
|
||||
4. Click <img src="/images/docs/v3.3/multicluster-management/import-cloud-hosted-k8s/import-gke/three-dots.png" height="20px" alt="icon"> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
4. Click <img src="/images/docs/v3.x/multicluster-management/import-cloud-hosted-k8s/import-gke/three-dots.png" height="20px" alt="icon"> on the right and then select **Edit YAML** to edit `ks-installer`.
|
||||
|
||||
5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ There can only be one host cluster while multiple member clusters can exist at t
|
|||
|
||||
If you are using on-premises Kubernetes clusters built through kubeadm, install KubeSphere on your Kubernetes clusters by referring to [Air-gapped Installation on Kubernetes](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/), and then enable KubeSphere multi-cluster management through direct connection or agent connection.
|
||||
|
||||

|
||||

|
||||
|
||||
## Vendor Agnostic
|
||||
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@ The most common use cases of multi-cluster management include service traffic lo
|
|||
|
||||
KubeSphere is developed to address multi-cluster and multi-cloud management challenges, including the scenarios mentioned above. It provides users with a unified control plane to distribute applications and its replicas to multiple clusters from public cloud to on-premises environments. KubeSphere also boasts rich observability across multiple clusters including centralized monitoring, logging, events, and auditing logs.
|
||||
|
||||

|
||||

|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ You can remove a cluster by using either of the following methods:
|
|||
|
||||
1. Click **Platform** in the upper-left corner and select **Cluster Management**.
|
||||
|
||||
2. In the **Member Clusters** area, click <img src="/images/docs/v3.3/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the the cluster that you want to remove from the control plane, and then click **Remove Cluster**.
|
||||
2. In the **Member Clusters** area, click <img src="/images/docs/v3.x/common-icons/three-dots.png" width="15" alt="icon" /> on the right of the the cluster that you want to remove from the control plane, and then click **Remove Cluster**.
|
||||
|
||||
3. In the **Remove Cluster** dialog box that is displayed, read the risk alert carefully. If you still want to proceed, enter the name of the member cluster, and click **OK**.
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ linkTitle: "Enable Pluggable Components"
|
|||
|
||||
weight: 6000
|
||||
|
||||
icon: "/images/docs/v3.3/docs.svg"
|
||||
icon: "/images/docs/v3.x/docs.svg"
|
||||
---
|
||||
|
||||
This chapter demonstrates detailed steps of enabling different components in KubeSphere both before and after installation so that you can take full advantage of the [container platform](https://kubesphere.io/) for your business.
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu
|
|||
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
|
||||
{{</ notice >}}
|
||||
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-alerting/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-alerting/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
|
||||
4. In this YAML file, navigate to `alerting` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration.
|
||||
|
||||
|
|
@ -89,7 +89,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-alerting/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-alerting/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
{{</ notice >}}
|
||||
|
||||
## Verify the Installation of the Component
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-app-store/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-app-store/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
|
||||
4. In this YAML file, search for `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration.
|
||||
|
||||
|
|
@ -98,7 +98,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-app-store/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-app-store/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
|
||||
{{</ notice >}}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ By default, ks-installer will install Elasticsearch internally if Auditing is en
|
|||
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
|
||||
{{</ notice >}}
|
||||
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-auditing-logs/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-auditing-logs/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
|
||||
4. In this YAML file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration.
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ By default, Elasticsearch will be installed internally if Auditing is enabled. F
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-auditing-logs/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-auditing-logs/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
{{</ notice >}}
|
||||
|
||||
## Verify the Installation of the Component
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
|
|||
|
||||
{{</ notice >}}
|
||||
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-devops-system/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
3. In **Custom Resources**, click <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-devops-system/three-dots.png" height="20px"> on the right of `ks-installer` and select **Edit YAML**.
|
||||
|
||||
4. In this YAML file, search for `devops` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration.
|
||||
|
||||
|
|
@ -95,7 +95,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
|
|||
|
||||
{{< notice note >}}
|
||||
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.3/enable-pluggable-components/kubesphere-devops-system/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
You can find the web kubectl tool by clicking <img src="/images/docs/v3.x/enable-pluggable-components/kubesphere-devops-system/hammer.png" height="20px"> in the lower-right corner of the console.
|
||||
|
||||
{{</ notice >}}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue