fix conflicts

Signed-off-by: FeynmanZhou <pengfeizhou@yunify.com>
This commit is contained in:
FeynmanZhou 2020-09-30 17:42:28 +08:00
commit aec86f416a
167 changed files with 2490 additions and 858 deletions

View File

@ -8,6 +8,10 @@ This style guide provides a set of editorial guidelines for those who are writin
- English is the preferred language to use when you write documentation. If you are not sure whether you are writing correctly, you can use grammar checkers (e.g. [grammarly](https://www.grammarly.com/)). Although they are not 100% accurate, they can help you get rid of most of the wording issues. That said, Chinese is also acceptable if you really don't know how to express your meaning in English.
- It is recommended that you use more images or diagrams to show UI functions and logical relations with tools such as [draw.io](https://draw.io).
## Preparation Notice
Before you start writing the specific steps for a feature, state clearly what should be ready in advance, such as necessary components, accounts or roles (do not tell readers to use `admin` for all the operations, which is unreasonable in reality for different tenants), or a specific environment. You can add this part at the beginning of a tutorial or put it in a separate part (e.g. **Prerequisites**).
## Paragraphs
- It is not recommended that you write a single sentence more than two lines (excluding enumeration).

View File

@ -75,13 +75,27 @@ Now you can preview the website in your browser using `http://localhost:1313/`.
### Open a pull request
Open a [pull request (PR)](https://help.github.com/en/desktop/contributing-to-projects/creating-an-issue-or-pull-request#creating-a-new-pull-request) to add a localization to the repository.
Open a [pull request (PR)](https://help.github.com/en/desktop/contributing-to-projects/creating-an-issue-or-pull-request#creating-a-new-pull-request) to add a localization to the repository. Please use DCO sign-off when you submit a pr. Refer to the command below (add `-s`):
```bash
git commit -s -m "xxx"
```
### Preview a pull request
Click **Details** as shown in the image below, which will direct you to the website homepage. Navigate to the part you want to preview.
![](https://ap3.qingstor.com/kubesphere-website/docs/preview-pr-github.png)
If the button above does not appear, go to **Files changed** tab. Click the three dots of the md file you want to preview as shown below. Please note that this method can only give you a preview on the GitHub instead of on the website.
![view-file](https://ap3.qingstor.com/kubesphere-website/docs/view-file-github.png)
## Localizing
### Find your two-letter language code
First, find your localizations two-letter country code. For example, the two-letter code for Turkey is tr. then, open `config.toml`, change the menu which language you want to translate。
First, find your localizations two-letter country code. For example, the two-letter code for Turkey is tr. Then, open `config.toml` and change the menu of the language you want to translate.
```
[languages.tr]

118
assets/js/aside.js Normal file
View File

@ -0,0 +1,118 @@
var getElementTopToScreenTop = function(element) {
var elementOffsetTop = element.offset().top
var windowScrollTop = $(window).scrollTop()
return elementOffsetTop - windowScrollTop
}
var getElementBottomToScreenBottom = function(element) {
return $(window).height() + $(document).scrollTop() - element.offset().top - element.outerHeight()
}
var getElementBottomToTop = function(element) {
var elementHeight = element.outerHeight()
var elementOffsetTop = element.offset().top
return elementOffsetTop + elementHeight
}
var bindAsideScroll = function() {
var screenWidth = $(window).width()
if (screenWidth <= 768) {
return false
}
var content = $('.middle-div')
var aside = $('.aside')
var contentToTop = getElementTopToScreenTop(content)
aside.css("top", contentToTop)
var asideInner = $('.aside .inner-div')
scrollEvent(content, aside, asideInner)
}
var bindLeftTreeScroll = function() {
var screenWidth = $(window).width()
if (screenWidth <= 768) {
return false
}
var content = $('.middle-div')
var aside = $('.common-layout .left-tree')
var contentToTop = getElementTopToScreenTop(content)
aside.css("top", contentToTop)
var asideInner = $('.left-div .inner-tree')
scrollEvent(content, aside, asideInner)
}
var scrollEvent = function(content, aside, asideInner) {
$( window ).scroll(function() {
var headerHeight = $('header').outerHeight()
var contentToTop = getElementTopToScreenTop(content)
if (contentToTop < headerHeight + 10) {
aside.css("top", headerHeight + 10)
aside.css("bottom", 10)
var s1 = getElementBottomToTop(content)
var s2 = getElementBottomToTop(asideInner)
if (s2 > s1) {
var bottom = getElementBottomToScreenBottom(content)
aside.css("bottom", bottom)
}
} else {
aside.css("top", contentToTop)
aside.css("bottom", 10)
}
});
}
var bindClickLink = function() {
var aside = $('.aside')
aside.find('a').click(function(event) {
var id = $(this).attr('href')
setTimeout(function() {
scrollToElement(id)
})
})
}
var initScrollByHash = function() {
var hash = decodeURI(window.location.hash)
var element = $(hash)
if (element.length > 0) {
setTimeout(function() {
scrollToElement(hash)
})
}
}
var scrollToElement = function(id) {
var element = $(id)
var headerHeight = $('header').outerHeight()
var toTop = element.offset().top
window.scrollTo(0, toTop - headerHeight)
}
var bindScrollTableActive = function() {
var screenWidth = $(window).width()
if (screenWidth <= 768) {
return false
}
var aside = $('.aside')
var headerHeight = $('header').outerHeight()
$( window ).scroll(function() {
aside.find('a').each(function() {
var id = $(this).attr('href')
var h = $(id)
var elementToTop = getElementTopToScreenTop(h)
if (elementToTop < headerHeight + 10) {
aside.find('.active').removeClass('active')
$(this).addClass('active')
}
})
})
}
if ($('.aside').length > 0) {
bindAsideScroll()
bindScrollTableActive()
}
if ($('.common-layout .left-tree').length > 0) {
bindLeftTreeScroll()
}
bindClickLink()
initScrollByHash()

View File

@ -44,6 +44,7 @@ function addCopyButtons(clipboard) {
pre.appendChild(div)
});
}
$('pre').css('background', '#242e42')
if (navigator && navigator.clipboard) {
addCopyButtons(navigator.clipboard);
}

View File

@ -23,6 +23,10 @@ section {
}
}
::-webkit-scrollbar-track {
background: transparent;
}
.padding {
padding-top: 40px;
}
@ -265,7 +269,7 @@ section {
margin: 20px 0;
position: relative;
p {
& > p {
padding: 0 10px;
margin: 0!important;
height: 30px;
@ -273,43 +277,43 @@ section {
color: #ffffff;
}
div {
& > div {
padding: 10px;
}
}
.notices.note {
p {
& > p {
background: #6ab0de
}
div {
& > div {
background: #e7f2fa;
}
}
.notices.tip {
p {
& > p {
background: #78C578
}
div {
& > div {
background: #E6F9E6;
}
}
.notices.info {
p {
& > p {
background: #F0B37E
}
div {
& > div {
background: #FFF2DB;
}
}
.notices.warning {
p {
& > p {
background: #E06F6C
}
div {
& > div {
background: #FAE2E2;
}
}
@ -374,6 +378,7 @@ section {
.code-over-div {
position: relative;
overflow: auto;
padding: 1em;
}
.copy-code-button {
display: none;

View File

@ -78,7 +78,6 @@
}
.md-body h2 {
font-size: 24px;
font-weight: 500;
line-height: 64px;
color: #171c34;
@ -89,7 +88,6 @@
}
.md-body h3 {
font-size: 16px;
font-weight: 600;
line-height: 1.5;
color: #171c34;
@ -142,12 +140,22 @@
}
.aside {
position: fixed;
top: 150px;
left: 50%;
bottom: 10px;
transform: translateX(350px);
width: 230px;
max-height: 700px;
overflow: scroll;
@media only screen and (max-width: $mobile-max-width) {
display: none;
}
.inner-div {
max-height: 100%;
position: relative;
overflow-y: auto;
}
.title {
height: 32px;
font-size: 24px;
@ -178,26 +186,15 @@
color: #55bc8a;
}
}
.active {
color: #55bc8a;
}
}
li li {
padding-left: 20px;
}
}
}
.aside-fixed {
position: fixed;
top: 150px;
left: 50%;
transform: translateX(350px);
}
.aside-absolute {
position: absolute;
right: 0;
bottom: 0;
top: none;
left: none;
}
}
}

View File

@ -1,8 +1,14 @@
@import "variables";
@import "markdown";
body {
html, body {
background: #ffffff;
height: 100%;
min-height: 100%;
}
main {
min-height: calc(100% - 105px);
}
.section-1 {
@ -153,20 +159,33 @@ body {
}
.left-div {
position: absolute;
top: 0;
left: 0;
width: 274px;
background-color: #ffffff;
position: fixed;
top: 260px;
bottom: 20px;
left: 50%;
transform: translateX(-580px);
width: 180px;
z-index: 2;
transition: all 0.5s;
@media only screen and (max-width: 1160px) {
left: 0;
transform: translateX(0px);
}
@media only screen and (max-width: $mobile-max-width) {
left: -274px;
transition: all 0.5s;
left: -180px;
}
.inner-tree {
max-height: 100%;
position: relative;
overflow-y: auto;
background-color: #ffffff;
}
.nav-menu {
padding: 20px;
padding: 10px;
font-size: 14px;
color: #31383e;
background-color: #f6f8fa;
@ -229,7 +248,6 @@ body {
}
.download-div {
width: 274px;
height: 44px;
margin-top: 10px;
line-height: 44px;
@ -258,13 +276,15 @@ body {
.middle-div {
position: relative;
margin-left: 274px;
margin-right: 80px;
padding: 10px 20px;
margin-left: 180px;
margin-right: 180px;
padding: 10px;
border-right: 1px solid #dde1e6;
@media only screen and (max-width: $mobile-max-width) {
margin: 0;
padding: 0;
border-right: none;
}
.top-div {
@ -415,21 +435,18 @@ body {
.content-div {
.md-body {
font-size: 16px;
line-height: 1.71;
line-height: 2.29;
color: #36435c;
}
.md-body h2 {
font-size: 24px;
font-weight: 500;
line-height: 40px;
color: #171c34;
text-shadow: none;
text-align: left;
}
.md-body h3 {
font-size: 16px;
font-weight: 600;
line-height: 1.5;
color: #171c34;
@ -546,6 +563,7 @@ body {
.page-div {
position: relative;
margin-top: 20px;
height: 30px;
a {
position: absolute;
@ -579,65 +597,195 @@ body {
}
.aside {
position: relative;
width: 174px;
padding-left: 10px;
border-left: 1px solid #dde1e6;
max-height: 600px;
overflow: scroll;
position: fixed;
top: 260px;
bottom: 10px;
left: 50%;
transform: translateX(400px);
width: 180px;
@media only screen and (max-width: $mobile-max-width) {
display: none;
display: none;
}
.inner-div {
max-height: 100%;
position: relative;
overflow-y: auto;
padding-left: 10px;
}
.title {
font-size: 14px;
line-height: 1.71;
color: #3e464c;
font-size: 14px;
line-height: 1.71;
color: #3e464c;
}
.tabs {
#TableOfContents > ul > li > a {
font-weight: 500;
}
li {
margin: 8px 0;
font-size: 14px;
a {
display: block;
width: 100%;
font-weight: 400;
line-height: 24px;
color: #68747f;
&:hover {
color: #00a971;
}
#TableOfContents > ul > li > a {
font-weight: 500;
}
li {
margin: 8px 0;
font-size: 14px;
a {
display: block;
width: 100%;
font-weight: 400;
line-height: 24px;
color: #68747f;
&:hover {
color: #00a971;
}
}
.active {
color: #00a971;
.active {
color: #00a971;
}
}
li li {
padding-left: 20px;
}
}
li li {
padding-left: 20px;
}
}
}
.aside-fixed {
position: fixed;
top: 260px;
left: 50%;
transform: translateX(500px);
}
.aside-absolute {
position: absolute;
right: -104px;
bottom: 0;
top: none;
left: none;
}
}
footer {
padding: 20px 0;
.down-main {
text-align: center;
.img-div {
font-size: 0;
svg {
width: 32px;
height: 32px;
color: #b6c2cd;
cursor: pointer;
transition: all 0.2s ease-in-out;
margin-right: 8px;
}
a {
display: inline-block;
width: 34px;
height: 34px;
margin: 0 6px;
background-position: center;
background-repeat: no-repeat;
}
.wechat {
position: relative;
.hide-div {
display: none;
position: absolute;
top: -10px;
left: 50%;
padding: 10px;
transform: translate(-50%, -100%);
background: #ffffff;
p {
font-size: 12px;
color: #b6c2cd;
}
img {
margin: 0 10px;
}
&::after {
content: '';
position: absolute;
bottom: -18px;
left: 50%;
transform: translateX(-50%);
border: 10px solid transparent;
border-top-color: #ffffff;
}
}
&:hover {
svg {
color: green;
}
div {
display: block;
}
}
}
.facebook-a {
background-image: url("/images/footer/facebook.svg");
&:hover {
background-image: url("/images/footer/facebook-hover.svg");
}
}
.youtube-a {
background-image: url("/images/footer/youtube.svg");
&:hover {
background-image: url("/images/footer/youtube-hover.svg");
}
}
.slack-a {
background-image: url("/images/footer/slack.svg");
&:hover {
background-image: url("/images/footer/slack-hover.svg");
}
}
.twitter-a {
background-image: url("/images/footer/twitter.svg");
&:hover {
background-image: url("/images/footer/twitter-hover.svg");
}
}
.github-a {
background-image: url("/images/footer/github.svg");
&:hover {
background-image: url("/images/footer/github-hover.svg");
}
}
.medium-a {
background-image: url("/images/footer/medium.svg");
&:hover {
background-image: url("/images/footer/medium-hover.svg");
}
}
.linkedin-a {
background-image: url("/images/footer/linkedin.svg");
&:hover {
background-image: url("/images/footer/linkedin-hover.svg");
}
}
}
.p1 {
height: 23px;
margin-top: 6px;
font-size: 14px;
line-height: 23px;
font-weight: 500;
letter-spacing: 1.08px;
color: #242e42;
}
.case {
margin-top: 20px;
}
}
}

View File

@ -183,13 +183,16 @@
font-size: 11px;
line-height: 34px;
text-align: center;
color: #ffffff;
border-radius: 17px;
box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2);
background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #242e42, #242e42);
&:hover {
box-shadow: none;
}
a {
color: #ffffff;
}
}
}
}

View File

@ -164,7 +164,7 @@
}
.md-body h2 {
font-size: 32px;
font-size: 1.8em;
font-weight: normal;
line-height: 1.63;
text-align: center;
@ -252,6 +252,10 @@
border-collapse: collapse;
}
.md-body table a {
word-break: break-word;
}
.md-body table th {
font-weight: 600;
border-radius: 3px;

View File

@ -360,6 +360,7 @@
font-size: 14px;
line-height: 1.71;
color: #ffffff;
overflow: auto;
}
}
}

View File

@ -187,6 +187,10 @@
padding: 0;
border-radius: 0;
font-size: 0;
.video-div {
height: 100%;
}
video {
width: 100%;

View File

@ -2,9 +2,11 @@ baseURL = "https://kubesphere-v3.netlify.app"
[markup]
[markup.tableOfContents]
endLevel = 4
endLevel = 3
ordered = false
startLevel = 2
[markup.goldmark.renderer]
unsafe= true
[params]
@ -100,7 +102,7 @@ hasChildren = true
[[languages.en.menu.main]]
parent = "Documentation"
name = "v3.0.0"
name = "v3.0.0 <img src='/images/header/star.svg' alt='star'>"
URL = "docs/"
weight = 1
@ -226,7 +228,7 @@ hasChildren = true
name = "文档中心"
[[languages.zh.menu.main]]
parent = "文档中心"
name = "v3.0.0"
name = "v3.0.0 <img src='/images/header/star.svg' alt='star'>"
URL = "docs/"
weight = 1

View File

@ -0,0 +1,16 @@
---
title: "KubeSphere API"
description: "How to use KubeSphere API to build your own application"
layout: "single"
linkTitle: "API Documentation"
weight: 8100
icon: "/images/docs/docs.svg"
---
## [API Documentation](./kubesphere-api/)
The REST API is the fundamental fabric of KubeSphere. This page show you how to access KubeSphere API server.

View File

@ -0,0 +1,73 @@
---
title: "API Glossary"
keywords: 'kubernetes, docker, helm, jenkins, istio, prometheus'
description: 'KubeSphere AOI Glossary documentation'
weight: 240
---
## DevOps
|English/英文| Chinese/中文|
|---|---|
|DevOps|DevOps 工程|
|Workspace| 企业空间|
|Pipeline|流水线|
|Credential|凭证|
|Artifact |制品|
|Stage|流水线执行过程中的阶段|
|Step|阶段中的步骤|
|Branch|分支|
|SCM|源代码管理工具例如github、gitlab等|
|sonar|代码质量分析工具 sonarqube|
## Monitoring
|English/英文| Chinese/中文|
|---|---|
|Metric|指标|
|Usage|用量|
|Utilisation|利用率|
|Throughput|吞吐量|
|Capacity|容量|
|Proposal|Etcd 提案|
## Logging
|English/英文| Chinese/中文|
|---|---|
|Fuzzy Matching |模糊匹配|
## Router
|English/英文| Chinese/中文|
|---|---|
|Gateway|网关|
|Route|应用路由|
## Service Mesh
|English/英文| Chinese/中文|
|---|---|
|ServiceMesh|服务网格|
|Tracing|追踪(分布式追踪)|
|Canary Release| 金丝雀发布|
|Traffic mirroring|流量镜像|
|BlueGreen Release|蓝绿发布|
## Notification
|English/英文| Chinese/中文|
|---|---|
|addresslist|通知地址列表|
## Multi Cluster
|English/英文| Chinese/中文|
|---|---|
|Host Cluster|主集群/管理集群|
|Member Cluster|成员集群|
|Direct Connection|直接连接|
|Agent Connection|代理连接|

View File

@ -0,0 +1,93 @@
---
title: "KubeSphere API"
keywords: 'Kubernetes, KubeSphere, API'
description: 'KubeSphere API documentation'
weight: 240
---
In KubeSphere v3.0, we move the functionalities of _ks-apigateway_, _ks-account_ into _ks-apiserver_ to make the architecture more compact and straight forward. In order to use KubeSphere API, you need to expose _ks-apiserver_ to your client.
## Expose KubeSphere API service
If you are going to access KubeSphere inside the cluster, you can skip the following section and just using the KubeSphere API server endpoint **`http://ks-apiserver.kubesphere-system.svc`**.
But if not, you need to expose the KubeSphere API server endpoint to the outside of the cluster first.
There are many ways to expose a Kubernetes service, for simplicity, we use _NodePort_ in our case. Change service `ks-apiserver` type to NodePort by using following command, and then you are done.
```bash
root@master:~# kubectl -n kubesphere-system patch service ks-apiserver -p '{"spec":{"type":"NodePort"}}'
root@master:~# kubectl -n kubesphere-system get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
etcd ClusterIP 10.233.34.220 <none> 2379/TCP 44d
ks-apiserver NodePort 10.233.15.31 <none> 80:31407/TCP 49d
ks-console NodePort 10.233.3.45 <none> 80:30880/TCP 49d
```
Now, you can access `ks-apiserver` outside the cluster through URL like `http://[node ip]:31407`, where `[node ip]` means IP of any node in your cluster.
## Generate a token
There is one more thing to do before calling the API, authorization. Any clients that talk to the KubeSphere API server need to identify themselves first, only after successful authorization will the server respond to the call.
Let's say now a user `jeff` with password `P#$$w0rd` want to generate a token. He/She can issue a request like the following:
```bash
root@master:~# curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \
'http://[node ip]:31407/oauth/token' \
--data-urlencode 'grant_type=password' \
--data-urlencode 'username=admin' \
--data-urlencode 'password=P#$$w0rd'
```
If the identity is correct, the server will response something like the following. `access_token` is the token what we need to access the KubeSphere API Server.
```json
{
"access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU",
"token_type": "Bearer",
"refresh_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6InJlZnJlc2hfdG9rZW4iLCJleHAiOjE2MDA4NTk1OTgsImlhdCI6MTYwMDg0NTE5OCwiaXNzIjoia3ViZXNwaGVyZSIsIm5iZiI6MTYwMDg0NTE5OH0.PerssCLVXJD7BuCF3Ow8QUNYLQxjwqC8m9iOkRRD6Tc",
"expires_in": 7200
}
```
> **Note**: Please substitue `[node ip]:31407` with the real ip address.
## Make the call
Now you got everything you need to access api server, make the call using the access token just acquire :
```bash
root@master1:~# curl -X GET -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidWlkIjoiYTlhNjJmOTEtYWQ2Yi00MjRlLWIxNWEtZTFkOTcyNmUzNDFhIiwidG9rZW5fdHlwZSI6ImFjY2Vzc190b2tlbiIsImV4cCI6MTYwMDg1MjM5OCwiaWF0IjoxNjAwODQ1MTk4LCJpc3MiOiJrdWJlc3BoZXJlIiwibmJmIjoxNjAwODQ1MTk4fQ.Hcyf-CPMeq8XyQQLz5PO-oE1Rp1QVkOeV_5J2oX1hvU" \
-H 'Content-Type: application/json' \
'http://10.233.15.31/kapis/resources.kubesphere.io/v1alpha3/nodes'
{
"items": [
{
"metadata": {
"name": "node3",
"selfLink": "/api/v1/nodes/node3",
"uid": "dd8c01f3-76e8-4695-9e54-45be90d9ec53",
"resourceVersion": "84170589",
"creationTimestamp": "2020-06-18T07:36:41Z",
"labels": {
"a": "a",
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/os": "linux",
"gitpod.io/theia.v0.4.0": "available",
"gitpod.io/ws-sync": "available",
"kubernetes.io/arch": "amd64",
"kubernetes.io/hostname": "node3",
"kubernetes.io/os": "linux",
"kubernetes.io/role": "new",
"node-role.kubernetes.io/worker": "",
"topology.disk.csi.qingcloud.com/instance-type": "Standard",
"topology.disk.csi.qingcloud.com/zone": "ap2a"
},
"annotations": {
"csi.volume.kubernetes.io/nodeid": "{\"disk.csi.qingcloud.com\":\"i-icjxhi1e\"}",
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
"node.alpha.kubernetes.io/ttl": "0",
....
```
## API Reference
KubeSpehre API swagger json can be found in repo https://github.com/kubesphere/kubesphere/blob/master/api/
- KubeSphere specified API [swagger json](https://github.com/kubesphere/kubesphere/blob/master/api/ks-openapi-spec/swagger.json). It contains all the API that only applied to KubeSphere.
- KubeSphere specified CRD [swagger json](https://github.com/kubesphere/kubesphere/blob/master/api/openapi-spec/swagger.json). Contains all the generated CRD api documentation, it's same with Kubernetes api objects.

View File

@ -0,0 +1,42 @@
---
title: "Application Resources Monitoring"
keywords: "Kubernetes, docker, kubesphere, Prometheus"
description: "Kubernetes and KubeSphere node management"
linkTitle: "Application Resources Monitoring"
weight: 400
---
In addition to monitoring the data from the physical resource level, the cluster-admin needs to know how many application resources, such as the number of projects and DevOps projects, as well as how many specific types of workloads and services have already been used in the platform. Application resource monitoring is a summary of the resource usage and trends of the application level of the platform.
## Prerequisites
You need an account granted a role including the authorization of Clusters Management. For example, you can log in the console as admin directly or create a new role with the authorization and assign it to an account.
## Resource Usage
Click **Platform** in the top left corner and select **Clusters Management**.
![Platform](/images/docs/cluster-administration/cluster-status-monitoring/platform.png)
If you have enabled the multi-cluster feature with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly.
![Clusters Management](/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png)
Choose **Monitoring & Alerting -> Application Resources** to enter the overview page of application resource monitoring, including the summary of the usage of all resources in the cluster, as shown in the following figure.
![Resource Usage](/images/docs/cluster-administration/application-resources-monitoring/application-resources-monitoring.png)
Among them, cluster resource usage and application resource usage will retain the monitoring data of the last 7 days and support custom time range queries.
![Time Range](/images/docs/cluster-administration/application-resources-monitoring/time-range.png)
Click on a specific resource to view the specific usage and trends of the cluster at a certain time, such as clicking on Cluster Resources usage to enter its details page. The details page allows you to view specific monitoring data by project, and users can customize the time range as well.
![Cluster Resources Usage](/images/docs/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png)
## Usage Ranking
The usage ranking supports the ranking of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including CPU usage, memory usage, Pod count, network inbound and outbound, support ascending or descending order according to any one of the indicators.
![pUsage Ranking](/images/docs/cluster-administration/application-resources-monitoring/usage-ranking.png)

View File

@ -0,0 +1,170 @@
---
title: "Cluster Status Monitoring"
keywords: "Kubernetes, docker, kubesphere, Prometheus"
description: "Kubernetes and KubeSphere node management"
linkTitle: "Cluster Status Monitoring"
weight: 300
---
KubeSphere provides monitoring of related indicators such as CPU, memory, network, and disk of the cluster, and supports reviewing historical monitoring and node usage rankings in **Cluster Status Monitoring**.
## Prerequisites
You need an account granted a role including the authorization of Clusters Management. For example, you can log in the console as admin directly or create a new role with the authorization and assign it to an account.
## Cluster Status Monitoring
Click **Platform** in the top left corner and select **Clusters Management**.
![Platform](/images/docs/cluster-administration/cluster-status-monitoring/platform.png)
If you have enabled the multi-cluster feature with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly.
![Clusters Management](/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png)
Click **Monitoring & Alerting -> Cluster Status** to enter the overview page of cluster status monitoring, including **Cluster Node Status, Components Status, Cluster Resources Usage, ETCD Monitoring, Service Component Monitoring**, as shown in the following figure.
![Cluster Status Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png)
### Cluster Node Status
Cluster node status displays the current online status of all nodes, and supports drilling down to the host management page to view the real-time resource usage of all hosts, click **Node Online Status** to enter the cluster nodes page.
![Cluster Nodes](/images/docs/cluster-administration/cluster-status-monitoring/cluster-nodes.png)
Click the node name to enter the running status page of the node, it displays the information of CPU, Memory, Pod, Local Storage in the current node, and health status of the current node.
![Running Status](/images/docs/cluster-administration/cluster-status-monitoring/running-status.png)
Click the tab **Monitoring** to enter the monitoring page of the current node, including **CPU Utilization, CPU Load Average, Memory Utilization. Disk Utilization, inode Utilization, IOPS, DISK Throughput, Network Bandwidth**, as shown in the following figure.
![Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/monitoring.png)
### Components Status
KubeSphere provides the health status monitoring of various service components in the cluster. If some key service components are abnormal, the system may become unavailable. View the health status and running time of the current cluster service components, which can help users monitor the status of the cluster and locate problems in time.
Click **Components Status** to enter the detail page of service components.
![Service Components Status](/images/docs/cluster-administration/cluster-status-monitoring/service-components-status.png)
### Cluster Resources Usage
Cluster resources usage displays the indicators **Utilization of CPU, Memory, Disk, and Pod Quantity Trend** of all nodes in the cluster. Click the pie chart on the left to switch indicators, it will show the change of indicators in the last 15 minutes.
![Cluster Resources Usage](/images/docs/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png)
## Physical Resources Monitoring
Physical resource monitoring data can help users observe and establish normal standards for resources and cluster performance. KubeSphere supports viewing cluster monitoring data within 7 days, including CPU utilization, memory utilization, and average CPU load (1 minute / 5 minutes / 15 Minutes), inode usage rate, disk throughput (read/write), IOPS (read/write), network card rate (out/in), container group operating status. KubeSphere supports custom time range and time interval to view historical monitoring status. The following briefly introduces the meaning of each monitoring indicator.
![Physical Resources Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png)
### Monitoring indicators
#### CPU Utilization
CPU utilization is the statistics of CPU usage in a period. Through this indicator, it can see the situation of the CPU occupied in a period. In monitoring, if you find that the CPU usage of the system during a certain period soars, you must first locate which process is occupying the higher CPU. For example, for Java applications, there may be memory leaks or infinite loops in the code.
![CPU Utilization](/images/docs/cluster-administration/cluster-status-monitoring/cpu-utilization.png)
#### Memory Utilization
Memory is one of the important components on the computer, it is a bridge to communicate with the CPU, so the performance of the memory has a great impact on the computer. Data loading, thread concurrency, I/O buffering, etc. when the program is running all rely on memory. The size of available memory determines whether the program can run normally and the performance of the operation and the memory utilization rate can reflect the memory utilization status of the cluster And performance.
![Memory Utilization](/images/docs/cluster-administration/cluster-status-monitoring/memory-utilization.png)
#### CPU Load Average
CPU load average is the average number of processes in the system in a runnable state, and an uninterruptible state per unit of time, that is, the average number of active processes. Pay attention to that there is no direct relationship between the CPU load average, and the CPU usage rate. So when is the average load reasonable? The average load should be equal to the number of CPUs under ideal conditions, so when judging the average load, first determine how many CPUs the system has. Only when the average load is more than the number of CPUs, it means that the system is overload.
Now the problem is how to look at the CPU load average when it is divided into 1 minute / 5 minutes / 15 minutes in the figure below.
Under normal circumstances, all three times must be viewed. By analyzing the trend of system load, we can get a more comprehensive understanding of the current load status:
- If the curves of 1 minute / 5 minutes / 15 minutes are similar within a certain period, it indicates that the CPU load of the cluster is relatively stable
- If the value of 1 minute in a certain period, or a time point is much greater than 15 minutes, it means that the load in the last 1 minute is increasing, and you need to keep observation. Once the value of 1 minute exceeds the number of CPUs, it may mean that the system is overload. We need to further analyze the source of the problem.
- Conversely, if the value of 1 minute in a certain period, or a time point is much less than 15 minutes, it means that the load of the system has decreased in the last 1 minute, and a high load has been generated in the previous 15 minutes.
![CPU Load Average](/images/docs/cluster-administration/cluster-status-monitoring/cpu-load-average.png)
#### Disk Usage
The workload of KubeSphere such as `StatefulSets` and `DaemonSets` all rely on persistent volume, and some of its components and services also require a persistent volume to provide support, and such back-end storage relies on disks, such as Block storage or network shared storage. Providing a real-time monitoring environment for disk usage is an important part of maintaining high data reliability, because, in the daily management of the Linux system, platform administrators may encounter data loss or even system crashes due to insufficient disk space. Therefore, paying attention to the disk usage of the system and ensuring that the file system does not be filled, or abused is an important task of cluster management. By monitoring the historical data of disk usage, you can know the disk usage in advance. If you find that the disk usage is too high, you can save disk space by cleaning up unnecessary images or containers.
![Disk Usage](/images/docs/cluster-administration/cluster-status-monitoring/disk-usage.png)
#### inode Utilization
Each file must have an inode, which is used to store the file's meta-information, such as the file's creator and creation date. The inode will also consume hard disk space, and many small cache files can easily lead to the exhaustion of inode resources. Also, the inode may be used up, but the hard disk is not full, and new files cannot be created on the hard disk at this time. The monitoring of inode usage can just detect the above-mentioned situations in advance, help users know the usage of cluster inodes, prevent the cluster from being unable to work due to inode exhaustion, and prompt users to clean up temporary files in time.
![inode Utilization](/images/docs/cluster-administration/cluster-status-monitoring/inode-utilization.png)
#### Disk Throughput
The monitoring of disk throughput and IOPS is an indispensable part of disk monitoring, which is convenient for cluster administrators to adjust data layout and other management activities to optimize the overall performance of the cluster. Disk throughput refers to the speed of the disk transmission data stream, the unit is MB/s, and the transmission data is the sum of reading data and write data. When transmitting large blocks of discontinuous data, this indicator has an important reference role.
![Disk Throughput](/images/docs/cluster-administration/cluster-status-monitoring/disk-throughput.png)
#### IOPS
A disk I/O is a continuous read or write of a disk. The IOPS of a disk is the sum of the number of continuous reads and writes per second. This indicator has an important reference significance when transmitting small discontinuous data.
![IOPS](/images/docs/cluster-administration/cluster-status-monitoring/iops.png)
#### Network Bandwidth
The network bandwidth is the ability of the network card to receive or send data per second, in Mbps (megabits per second).
![Network Bandwidth](/images/docs/cluster-administration/cluster-status-monitoring/netework-bandwidth.png)
#### Pod Status
Pod status displays the total number of pods in different states, including Running, Completed and Warning. The pod tagged Completed usually refers to a Job or a CronJob. The number of pods marked Warning, which means an abnormal state, requires special attention.
![Pod Status](/images/docs/cluster-administration/cluster-status-monitoring/pod-status.png)
## ETCD Monitoring
ETCD monitoring is useful to make good use of ETCD, especially to locate performance problems. The ETCD service provides metrics interfaces natively, and the KubeSphere monitoring system provides a good monitoring display effect for its native data.
|Monitoring indicators|Description|
|---|---|
|ETCD Nodes | - `Is there a Leader`: Indicates whether the member has a Leader. If the member does not have a Leader, it is completely unavailable. If all members in the cluster do not have any Leader, the entire cluster is completely unavailable. <br> - `Leader change times`: Used to calculate the times of Leader changed seen by members of the cluster since the beginning. Frequent Leader changes will significantly affect the performance of ETCD. It also shows that the leader is unstable, possibly due to network connection issues or excessive load hitting the ETCD cluster|
|DB Size | The size of the underlying database (in MiB) of ETCD, the current display is the average size of each member database of ETCD. |
|Client Traffic|Including the total traffic sent to the grpc client and the total traffic received from the grpc client, for more information about indicators, see [etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network) |
|gRPC Stream Messages|The gRPC streaming message receiving rate and sending rate on the server-side, which can reflect whether there are large-scale data read and write operations in the cluster, for more information about indicators, see [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters)|
|WAL Fsync|The delay of WAL calling fsync, when ETCD keeps its log entries to disk before applying them, wal_fsync will be called, for more information about indicators, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests) |
|DB Fsync|The submission delay distribution of the backend calls. When ETCD submits its most recent incremental snapshot to disk, backend_commit will be called. Note that high latency of disk operations (long WAL log synchronization time or library synchronization time) usually indicates disk problems, which may cause high request latency or make the cluster unstable, for more information about indicators, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests) |
|Raft Proposals| - `Proposal Commit Rate`: Record the rate of consensus proposals committed. If the cluster is healthy, this indicator should increase over time. Several healthy members of the ETCD cluster may have different general proposals at the same time. The continuous large lag between a single member and its leader indicates that the member is slow or unhealthy. <br> - `Proposal Apply Rate`: Record the total rate of consensus proposals applied. The ETCD server applies each committed proposal asynchronously. The difference between the proposal commit rate and the proposal apply rate should usually be small (only a few thousand even under high load). If the difference between them continues to rise, it indicates that the ETCD server is overloaded. This can happen when using large-scale queries such as heavy range queries or large txn operations. <br> - `Proposal Failure Rate`: The total rate of failed proposals, usually related to two issues, temporary failures related to leader election or longer downtime due to loss of arbitration in the cluster. <br> - `Proposal Pending Total`: The current number of pending proposals. An increase in pending proposals indicates high client load or members unable to submit proposals. <br> Currently, the data displayed on the interface is the average size of the ETCD member indicators. For more information about indicators, see [etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server).
![ETCD Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/etcd-monitoring.png)
## APIServer Monitoring
[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) is the hub for the interaction of all components in a Kubernetes cluster. The following table lists the main indicators monitored for the APIServer.
|Monitoring indicators|Description|
|---|---|
|Request Latency|Classified statistics by HTTP request method, the latency of resource request response in milliseconds|
|Request Per Second|The number of requests accepted by kube-apiserver per second|
![APIServer Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png)
## Scheduler Monitoring
[Scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) monitors the Kubernetes API of newly created pods and determines which nodes these new pods run on. It makes this decision based on available data, including the availability of collected resources and the resource requirements of the Pod. Monitoring data for scheduling delays ensures that you can see any delays facing the scheduler.
|Monitoring indicators|Description|
|---|---|
|Attempt Frequency|Including the number of scheduling successes, errors, and failures|
|Attempt Rate|Including scheduling rate of success, error, and failure|
|Scheduling latency|End-to-end scheduling delay, which is the sum of scheduling algorithm delay and binding delay|
![Scheduler Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png)
## Node Usage Ranking
You can sort nodes in ascending and descending order by indicators such as CPU, Load Average, Memory, Local Storage, inode Utilization, and Pod Utilization. This enables administrators to quickly find potential problems or identify a node's insufficient resources.
![Node Usage Ranking](/images/docs/cluster-administration/cluster-status-monitoring/node-usage-ranking.png)

View File

@ -1,5 +1,5 @@
---
title: "Nodes"
title: "Nodes Management"
keywords: "kubernetes, StorageClass, kubesphere, PVC"
description: "Kubernetes Nodes Management"
@ -7,4 +7,32 @@ linkTitle: "Nodes"
weight: 200
---
TBD
Kubernetes runs your workload by placing containers into Pods to run on Nodes. A node may be a virtual or physical machine, depending on the cluster. Each node contains the services necessary to run Pods, managed by the control plane.
## Nodes Status
Cluster nodes are only accessible to cluster administrators. Administrators can find cluster nodes page by _Cluster Adminstration_ -> _Nodes_ -> _Cluster Nodes_ . Some node metrics are very important to clusters, its administrators' responsibilities to watch over these numbers to make sure nodes are available.
![Node Status](/images/docs/cluster-administration/node_status.png)
- **Status** : Node current status, indicate node is available or not.
- **CPU** : Node current cpu usage, these values are real-time numbers.
- **Memory** : Current memory usage, same with _CPU_ stats, are also real-time numbers.
- **Allocated CPU** : Calculated from summing up all pods cpu requests on this node, means how much amount of CPU reserved for workloads on this node, even workloads are using less CPU resource. This metric is vital to kubernetes scheduler, kube-scheduler will favor lower _Allocated CPU_ usage nodes when scheduling a pod in most cases. For more details, refer to [manage resources containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
- **Allocated Memory** : Calculated from summing up all pods memory requests on this node. Same with _Allocated CPU_.
> **Note:** _CPU_ and _Allocated CPU_ are differ in most of times, so is memory, this is normal. As a cluster administrator, should focus on both kind of metrics instead of just one. It's always a good practice to set resources requests and limits for each pod to match the their real usage. Over allocating can lead to low cluster utilization, otherwise can lead to high pressure on cluster, and even cluster unhealthy.
## Nodes Management
![Node Detail](/images/docs/cluster-administration/node_detail.png)
- **Cordon/Uncordon** : Marking a node as unschedulable are very useful during a node reboot or othere maintenance. The kubernetes scheduler will not schedule new pods to this node if it's been marked unschedulable, but does not affect existing workloads already on this Node. In KubeSphere, you mark a node as unschedulable by click button _Cordon_ in node detail page, node will be schedulable again if click the button again.
- **Labels** : Node labels can be very useful when you want to assign pods to specific nodes. Label the nodes first, for example, label GPU nodes with label `node-role.kubernetes.io/gpu-node`, and then when create workloads with the label `node-role.kubernetes.io/gpu-node` you can assign pos to gpu node explictly.
![Label Node](/images/docs/cluster-administration/label_node.png)
![Assign pods to nodes](/images/docs/cluster-administration/assign_pods_to_nodes.png)
- **Tainits** : Taints allow node to repel a set of pods, [taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). You can add or remove node taints in node detail page, but be careful, taints can cause unexpected behavior may lead to service unavailable.

View File

@ -1,10 +1,83 @@
---
title: "Role and Member Management"
keywords: 'kubernetes, kubesphere, air gapped, installation'
keywords: 'Kubernetes, KubeSphere, DevOps, role, member'
description: 'Role and Member Management'
weight: 2240
---
TBD
This guide demonstrates how to manage roles and members in your DevOps project. For more information about KubeSphere roles, see Overview of Role Management.
In DevOps project scope, you can grant the following resources' permissions to a role:
- Pipelines
- Credentials
- DevOps Settings
- Access Control
## Prerequisites
At least one DevOps project has been created, such as `demo-devops`. Besides, you need an account of the `admin` role (e.g. `devops-admin`) at the DevOps project level. See [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project/) if it is not ready yet.
## Built-in Roles
In **Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a DevOps project is created and they cannot be edited or deleted.
| Built-in Roles | Description |
| ------------------ | ------------------------------------------------------------ |
| viewer | The viewer who can view all resources in the DevOps project. |
| operator | The normal member in a DevOps project who can create pipelines and credentials in the DevOps project. |
| admin | The administrator in the DevOps project who can perform any action on any resource. It gives full control over all resources in the DevOps project. |
## Create a DevOps Project Role
1. Log in the console as `devops-admin` and select a DevOps project (e.g. `demo-devops`) under **DevOps Projects** list.
{{< notice note >}}
The account `devops-admin` is used as an example. As long as the account you are using is granted a role including the authorization of **Project Members View**, **Project Roles Management** and **Project Roles View** in **Access Control** at DevOps project level, it can create a DevOps project role.
{{</ notice >}}
2. Go to **Project Roles** in **Project Management**, click **Create** and set a **Role Identifier**. In this example, a role named `pipeline-creator` will be created. Click **Edit Authorization** to continue.
![Create a devops project role](/images/docs/devops-admin/devops_role_step1.png)
3. In **Pipelines Management**, select the authorization that you want the user granted this role to have. For example, **Pipelines Management** and **Pipelines View** are selected for this role. Click **OK** to finish.
![Edit Authorization](/images/docs/devops-admin/devops_role_step2.png)
{{< notice note >}}
**Depend on** means the major authorization (the one listed after **Depend on**) needs to be selected first so that the affiliated authorization can be assigned.
{{</ notice >}}
4. Newly-created roles will be listed in **Project Roles**. You can click the three dots on the right to edit it.
![Edit Roles](/images/docs/devops-admin/devops_role_list.png)
{{< notice note >}}
The role of `pipeline-creator` is only granted **Pipelines Management** and **Pipelines View**, which may not satisfy your need. This example is only for demonstration purpose. You can create customized roles based on your needs.
{{</ notice >}}
## Invite a New Member
1. In **Project Management**, select **Project Members** and click **Invite Member**.
2. Invite a user to the DevOps project. Grant the role of `pipeline-creator` to the user.
![invite member](/images/docs/devops-admin/devops_invite_member.png)
{{< notice note >}}
The user must be invited to the DevOps project's workspace first.
{{</ notice >}}
3. After you add a user to the DevOps project, click **OK**. In **Project Members**, you can see the newly invited member listed.
4. You can also change the role of an existing member by editing it or remove it from the DevOps project.
![edit member role](/images/docs/devops-admin/devops_user_edit.png)

View File

@ -11,6 +11,22 @@ icon: "/images/docs/docs.svg"
This chapter demonstrates how to deploy KubeSphere on existing Kubernetes clusters hosted on cloud or on-premises. As a highly flexible solution to container orchestration, KubeSphere allows users to deploy it and use its services across all Kubernetes engines.
## Most Popular Pages
Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first.
{{< popularPage icon="/images/docs/brand-icons/gke.jpg" title="Deploy KubeSphere on GKE" description="Provision KubeSphere on existing Kubernetes clusters on GKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/" >}}
{{< popularPage icon="/images/docs/bitmap.jpg" title="Deploy KubeSphere on AWS EKS" description="Provision KubeSphere on existing Kubernetes clusters on EKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}}
{{< popularPage icon="/images/docs/brand-icons/aks.jpg" title="Deploy KubeSphere on AKS" description="Provision KubeSphere on existing Kubernetes clusters on AKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/" >}}
{{< popularPage icon="/images/docs/brand-icons/huawei.svg" title="Deploy KubeSphere on CCE" description="Provision KubeSphere on existing Kubernetes clusters on Huawei CCE." link="../installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce/" >}}
{{< popularPage icon="/images/docs/brand-icons/oracle.jpg" title="Deploy KubeSphere on Oracle OKE" description="Provision KubeSphere on existing Kubernetes clusters on OKE." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/" >}}
{{< popularPage icon="/images/docs/brand-icons/digital-ocean.jpg" title="Deploy KubeSphere on DO" description="Provision KubeSphere on existing Kubernetes clusters on DigitalOcean." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/" >}}
## Introduction
### [Overview](../installing-on-kubernetes/introduction/overview/)
@ -58,9 +74,3 @@ Explore the best practice of installing KubeSphere in an air-gapped environment.
### [Uninstalling KubeSphere from Kubernetes](../installing-on-kubernetes/uninstalling/uninstalling-kubesphere-from-k8s/)
Remove KubeSphere from Kubernetes clusters.
## Most Popular Pages
Below you will find some of the most viewed and helpful pages in this chapter. It is highly recommended that you refer to them first.
{{< popularPage icon="/images/docs/bitmap.jpg" title="Deploy KubeSphere on AWS EKS" description="Provision KubeSphere on existing Kubernetes clusters on EKS." link="../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/" >}}

View File

@ -1,20 +1,22 @@
---
title: "Deploy KubeSphere on Huawei CCE"
keywords: "kubesphere, kubernetes, docker, huawei, cce"
description: "It is to introduce how to install KubeSphere 3.0 on Huaiwei CCE."
keywords: "KubeSphere, Kubernetes, installation, huawei, cce"
description: "How to install KubeSphere on Huawei CCE."
weight: 2275
---
This instruction is about how to install KubeSphere 3.0.0 on [Huaiwei CCE](https://support.huaweicloud.com/en-us/qs-cce/cce_qs_0001.html).
This guide walks you through the steps of deploying KubeSphere on [Huaiwei CCE](https://support.huaweicloud.com/en-us/qs-cce/cce_qs_0001.html).
## Preparation for Huawei CCE
### Create Kubernetes Cluster
### Create Kubernetes cluster
First, create a Kubernetes Cluster according to the resources. Meet the requirements below (ignore this part if your environment is as required).
First, create a Kubernetes cluster based on the requirements below.
- KubeSphere 3.0.0 supports Kubernetes `1.15.x`, `1.16.x`, `1.17.x`, and `1.18.x` by default. Select a version and create the cluster, e.g. `v1.15.11`, `v1.17.9`.
- Ensure the cloud computing network for your Kubernetes cluster works, or use an elastic IP when “Ato Create”or “Select Existing”; or confiure the network after the cluster is created. Refer to Configure [NAT Gateway](https://support.huaweicloud.com/en-us/productdesc-natgateway/en-us_topic_0086739762.html).
- Select `s3.xlarge.2`  `4-core8GB` for nodes and add more if necessary (3 and more nodes are required for production environment).
- KubeSphere 3.0.0 supports Kubernetes `1.15.x`, `1.16.x`, `1.17.x`, and `1.18.x` by default. Select a version and create the cluster, e.g. `v1.15.11` or `v1.17.9`.
- Ensure the cloud computing network for your Kubernetes cluster works, or use an elastic IP when you use “Auto Create” or “Select Existing”. You can also configure the network after the cluster is created. Refer to Configure [NAT Gateway](https://support.huaweicloud.com/en-us/productdesc-natgateway/en-us_topic_0086739762.html).
- Select `s3.xlarge.2`  `4-core8GB` for nodes and add more if necessary (3 and more nodes are required for a production environment).
### Create a public key for kubectl
@ -29,16 +31,19 @@ After you get the configuration file for kubectl, use kubectl command lines to v
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.8", GitCommit:"9f2892aab98fe339f3bd70e3c470144299398ace", GitTreeState:"clean", BuildDate:"2020-08-15T10:08:56Z", GoVersion:"go1.14.7", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"17+", GitVersion:"v1.17.9-r0-CCE20.7.1.B003-17.36.3", GitCommit:"136c81cf3bd314fcbc5154e07cbeece860777e93", GitTreeState:"clean", BuildDate:"2020-08-08T06:01:28Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"}
```
## KubeSphere Deployment
### Create a custom StorageClass
> Huawei CCE built-in Everest CSI provides StorageClass `csi-disk` which uses SATA (normal I/O) by default, but the actual disk that is for Kubernetes clusters is either SAS (high I/O) or SSD (extremely high I/O). So it is suggested that create an extra StorageClass and set it as default for later. Refer to the official document - [Use kubectl to create a cloud storage](https://support.huaweicloud.com/en-us/usermanual-cce/cce_01_0044.html).
{{< notice note >}}
Below is an example to create a SAS(high I/O) for its corresponding StorageClass.
Huawei CCE built-in Everest CSI provides StorageClass `csi-disk` which uses SATA (normal I/O) by default, but the actual disk that is used for Kubernetes clusters is either SAS (high I/O) or SSD (extremely high I/O). Therefore, it is suggested that you create an extra StorageClass and set it as default. Refer to the official document - [Use kubectl to create a cloud storage](https://support.huaweicloud.com/en-us/usermanual-cce/cce_01_0044.html).
{{</ notice >}}
Below is an example to create a SAS (high I/O) for its corresponding StorageClass.
```yaml
# csi-disk-sas.yaml
@ -64,19 +69,21 @@ volumeBindingMode: Immediate
```
For how to set up or cancel a default StorageClass, refer to Kubernetes official document - [Change Default StorageClass](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/)
For how to set up or cancel a default StorageClass, refer to Kubernetes official document - [Change Default StorageClass](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/).
### Use ks-installer to minimize the deployment
Use [ks-installer](https://github.com/kubesphere/ks-installer) to deploy KubeSphere on an existing Kubernetes cluster. It is suggested that you install it in minimal size.
Use [ks-installer](https://github.com/kubesphere/ks-installer) to deploy KubeSphere on an existing Kubernetes cluster. Execute the following commands directly for a minimal installation:
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.0/deploy/kubesphere-installer.yaml
$ kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.0/deploy/cluster-configuration.yaml
```
Go to `Workload` > `Pod`, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. `ks-console-xxxx` of the namespace to understand the app availability of KubeSphere console.
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.0/deploy/cluster-configuration.yaml
```
Go to `Workload` > `Pod`, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. Check `ks-console-xxxx` of the namespace to understand the availability of KubeSphere console.
![Deploy KubeSphere in Minimal](/images/docs/huawei-cce/en/deploy-ks-minimal.png)
@ -88,7 +95,7 @@ Go to `Resource Management` > `Network` and choose the service in `ks-console`.
![Expose KubeSphere Console](/images/docs/huawei-cce/en/expose-ks-console.png)
Default settings are OK for other detailed configurations. You can also set it as you need.
Default settings are OK for other detailed configurations. You can also set it based on your needs.
![Edit KubeSphere Console SVC](/images/docs/huawei-cce/en/edit-ks-console-svc.png)
@ -96,16 +103,16 @@ After you set LoadBalancer for KubeSphere console, you can visit it via the give
![Log in KubeSphere Console](/images/docs/huawei-cce/en/login-ks-console.png)
### Start add-ons via KubeSphere
## Enable Pluggable Components (Optional)
When KubeSphere can be visited via the Internet, all the actions can be done on the console. Refer to the document - `Start add-ons in KubeSphere 3.0`.
The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details.
💡 Notes: Before you start Istio, you have to delete `applications.app.k8s.io` built in Huawei CCE due to the CRD conflict. The simple way to do it is to use kubectl.
{{< notice note >}}
```bash
$ kubectl delete crd applications.app.k8s.io
```
Before you use Istio-based features of KubeSphere, you have to delete `applications.app.k8s.io` built in Huawei CCE due to the CRD conflict. You can run the command `kubectl delete crd applications.app.k8s.io` directly to delete it.
After all add-ons are installed, go to the Cluster Management, and you will see the interface below. You can see all the started add-ons in `Add-Ons`.
{{</ notice >}}
After your component is installed, go to the **Cluster Management** page, and you will see the interface below. You can check the status of your component in **Components**.
![Full View of KubeSphere Console](/images/docs/huawei-cce/en/view-ks-console-full.png)

View File

@ -13,15 +13,15 @@ As an open-source project on [GitHub](https://github.com/kubesphere), KubeSphere
Users are provided with multiple installation options. Please note not all options are mutually exclusive. For instance, you can deploy KubeSphere with minimal packages on multiple nodes in an air-gapped environment.
- [All-in-One](../all-in-one): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere.
- [Multi-Node](../multi-node): Install KubeSphere on multiple nodes. It is for testing or development.
- [Install KubeSphere on Air-gapped Linux](../install-ks-on-linux-airgapped): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines.
- [High Availability Installation](../master-ha): Install high availability KubeSphere on multiple nodes which is used for the production environment.
- [All-in-One](../../../quick-start/all-in-one-on-linux/): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere.
- [Multi-Node](../multioverview/): Install KubeSphere on multiple nodes. It is for testing or development.
- [Air-gapped Installation on Linux](../air-gapped-installation): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines.
- High Availability Installation: Install high availability KubeSphere on multiple nodes which is used for the production environment.
- Minimal Packages: Only install the minimum required system components of KubeSphere. Here is the minimum resource requirement:
- 2vCPUs
- 4GB RAM
- 40GB Storage
- [Full Packages](../complete-installation): Install all available system components of KubeSphere such as DevOps, service mesh, and alerting.
- [Full Packages](../../../pluggable-components/): Install all available system components of KubeSphere such as DevOps, service mesh, and alerting.
For the installation on Kubernetes, see Overview of Installing on Kubernetes.
@ -31,7 +31,7 @@ For the installation on Kubernetes, see Overview of Installing on Kubernetes.
- For all-in-one installation, the only one node is both the master and the worker.
- For multi-node installation, you need to specify the node roles in the configuration file before installation.
- Your linux host must have OpenSSH Server installed.
- Please check the [ports requirements](../port-firewall) before installation.
- Please check [Port Requirements](../port-firewall) before installation.
## KubeKey
@ -51,7 +51,7 @@ If you have existing Kubernetes clusters, please refer to [Installing on Kuberne
## Quick Installation for Development and Testing
KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Overview of Pluggable Components](../intro#pluggable-components-overview) for details.
KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Enable Pluggable Components](../../../pluggable-components/) for details.
The quick installation of KubeSphere is only for development or testing since it uses local volume for storage by default. If you want a production installation, see HA Cluster Configuration.
@ -60,7 +60,7 @@ The quick installation of KubeSphere is only for development or testing since it
{{< notice note >}}
For air-gapped installation, please refer to [Install KubeSphere on Air Gapped Linux Machines](../install-ks-on-linux-airgapped).
For air-gapped installation, please refer to [this tutorial](../air-gapped-installation/).
{{</ notice >}}
@ -69,7 +69,7 @@ For air-gapped installation, please refer to [Install KubeSphere on Air Gapped L
KubeKey allows users to install a highly available cluster for production. Users need to configure load balancers and persistent storage services in advance.
- [Persistent Storage Configuration](../storage-configuration): By default, KubeKey uses [Local Volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) based on [openEBS](https://openebs.io/) to provide storage services with dynamic provisioning in Kubernetes clusters. It is convenient for the quick installation of a testing environment. In a production environment, it must have a storage server set up. Please refer to [Persistent Storage Configuration](../storage-configuration) for details.
- [Load Balancer Configuration for HA installation](../master-ha): Before you get started with multi-node installation in a production environment, you need to configure load balancers. Cloud load balancers, Nginx and `HAproxy + Keepalived` all work for the installation.
- Load Balancer Configuration for HA installation: Before you get started with multi-node installation in a production environment, you need to configure load balancers. Cloud load balancers, Nginx and `HAproxy + Keepalived` all work for the installation.
For more information, see HA Cluster Configuration. You can also see the specific step of HA installations across major cloud providers in Installing on Public Cloud.
@ -93,7 +93,7 @@ The following links explain how to configure different types of persistent stora
### Add New Nodes
With KubeKey, you can scale the number of nodes to meet higher resource needs after the installation, especially in a production environment. For more information, see [Add New Nodes](../add-nodes).
With KubeKey, you can scale the number of nodes to meet higher resource needs after the installation, especially in a production environment. For more information, see [Add New Nodes](../../../installing-on-linux/cluster-operation/add-new-nodes/).
### Remove Nodes
@ -105,8 +105,8 @@ KubeKey allows you to set a new storage class after the installation. You can se
For more information, see Add New Storage Classes.
## Uninstall
## Uninstalling
Uninstalling KubeSphere means it will be removed from the machines, which is irreversible. Please be cautious with the operation.
For more information, see [Uninstall](../uninstall).
For more information, see [Uninstalling](../../../installing-on-linux/uninstalling/uninstalling-kubesphere-and-kubernetes/).

View File

@ -1,20 +1,52 @@
---
title: "Persistent Storage Configuration"
keywords: 'Kubernetes, docker, KubeSphere, storage, volume, PVC'
keywords: 'Kubernetes, docker, KubeSphere, storage, volume, PVC, KubeKey, add-on'
description: 'Persistent Storage Configuration'
linkTitle: "Persistent Storage Configuration"
weight: 2140
---
## Overview
Persistent volumes are a **Must** for KubeSphere. Therefore, before the installation of KubeSphere, a **default**
[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) and corresponding storage plugin should be installed on the Kubernetes cluster. As different users may choose different storage plugins, you can use [KubeKey](https://github.com/kubesphere/kubekey) to install storage plugins through
[add-on](https://github.com/kubesphere/kubekey/blob/v1.0.0/docs/addons.md). This tutorial will introduce add-on configurations for some mainly used storage plugins.
Persistent volumes are a **Must** for installing KubeSphere. [KubeKey](https://github.com/kubesphere/kubekey) lets KubeSphere be installed on different storage systems by the [add-on mechanism](https://github.com/kubesphere/kubekey/blob/v1.0.0/docs/addons.md). General steps of installing KubeSphere by KubeKey on Linux are:
## QingCloud-CSI
[QingCloud-CSI](https://github.com/yunify/qingcloud-csi) plugin implements an interface between CSI-enabled Container Orchestrator (CO) and the disk of QingCloud. Here is a helm-chart example of installing by KubeKey add-on.
1. Install Kubernetes.
2. Install the **add-on** plugin for KubeSphere.
3. Install Kubesphere by [ks-installer](https://github.com/kubesphere/ks-installer).
```bash
In KubeKey configurations, `spec.persistence.storageClass` of `ClusterConfiguration` needs to be set for ks-installer to create a PersistentVolumeClaim (PVC) for KubeSphere. If it is empty, the **default StorageClass** (annotation `storageclass.kubernetes.io/is-default-class` equals to `true`) will be used.
``` yaml
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
spec:
persistence:
storageClass: ""
...
```
Therefore, an available StorageClass **must** be installed in Step 2 above. It includes:
- StorageClass itself
- Storage Plugin for the StorageClass if necessary
This tutorial introduces **KubeKey add-on configurations** for some mainly used storage plugins. If `spec.persistence.storageClass` is empty, the default StorageClass will be installed. Refer to the following sections if you want to configure other storage systems.
## QingCloud CSI
If you plan to install KubeSphere on [QingCloud](https://www.qingcloud.com/), [QingCloud CSI](https://github.com/yunify/qingcloud-csi) can be chosen as the underlying storage plugin. The following is an example of KubeKey add-on configurations for QingCloud CSI installed by **Helm Charts including a StorageClass**.
### Chart Config
```yaml
config:
qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" # <--ToBeReplaced-->
qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" # <--ToBeReplaced ->
zone: "pek3a" # <--ToBeReplaced-->
sc:
isDefaultClass: true
```
If you want to configure more values, see [chart configuration for QingCloud CSI](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration).
### Add-on Config
Save the above chart config locally (e.g. `/root/csi-qingcloud.yaml`). The add-on config for QingCloud CSI could be like:
```yaml
addons:
- name: csi-qingcloud
namespace: kube-system
@ -22,16 +54,24 @@ addons:
chart:
name: csi-qingcloud
repo: https://charts.kubesphere.io/test
values:
- config.qy_access_key_id=SHOULD_BE_REPLACED
- config.qy_secret_access_key=SHOULD_BE_REPLACED
- config.zone=SHOULD_BE_REPLACED
- sc.isDefaultClass=true
values: /root/csi-qingcloud.yaml
```
For more information about QingCloud, see [QingCloud](https://www.qingcloud.com/). For more chart values, see [Configuration](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration).
## NFS-client
The [nfs-client-provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, dynamically creating Persistent Volumes. Here is a helm-chart example of installing by KubeKey add-on.
## NFS Client
With a NFS server, you can choose [NFS-client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) as the storage plugin. NFS-client Provisioner creates the PersistentVolume dynamically. The following is an example of KubeKey add-on configurations for NFS-client Provisioner installed by **Helm Charts including a StorageClass** .
### Chart Config
```yaml
nfs:
server: "192.168.0.27" # <--ToBeReplaced->
path: "/mnt/csi/" # <--ToBeReplaced->
storageClass:
defaultClass: false
```
If you want to configure more values, see [chart configuration for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner#configuration).
### Add-on Config
Save the above chart config locally (e.g. `/root/nfs-client.yaml`). The add-on config for NFS-Client Provisioner cloud be like:
```yaml
addons:
- name: nfs-client
@ -40,43 +80,39 @@ addons:
chart:
name: nfs-client-provisioner
repo: https://charts.kubesphere.io/main
values:
- nfs.server=SHOULD_BE_REPLACED
- nfs.path=SHOULD_BE_REPLACED
- storageClass.defaultClass=true
values: /root/nfs-client.yaml
```
For more chart values, see [Configuration](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner#configuration).
## Ceph RBD
Ceph RBD is an in-tree storage plugin on Kubernetes. As **hyperkube** images were [deprecated since 1.17](https://github.com/kubernetes/kubernetes/pull/85094),
**KubeKey** will never use **hyperkube** images. Hence, in-tree Ceph RBD may not work on Kubernetes installed by **KubeKey**. If you work with 14.0.0 (Nautilus)+ Ceph Cluster, we appreciate that you use [Ceph CSI](#Ceph CSI). Meanwhile, you could use [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) as a substitute, the format of which is the same as in-tree Ceph RBD. Here is an example of rbd-provisioner.
## Ceph
With a Ceph server, you can choose [Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) or [Ceph CSI](https://github.com/ceph/ceph-csi) as the underlying storage plugin. Ceph RBD is an in-tree storage plugin on Kubernetes, and Ceph CSI is a Container Storage Interface (CSI) driver for RBD, CephFS.
```yaml
- name: rbd-provisioner
namespace: kube-system
sources:
chart:
name: rbd-provisioner
repo: https://charts.kubesphere.io/test
values:
- ceph.mon=SHOULD_BE_REPLACED # like 192.168.0.10:6789
- ceph.adminKey=SHOULD_BE_REPLACED
- ceph.userKey=SHOULD_BE_REPLACED
- sc.isDefault=true
```
For more values, see [Configuration](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration).
### Which Plugin to Select for Ceph
Ceph CSI RBD is the preferred choice if you work with **14.0.0 (Nautilus)+** Ceph cluster. Here are some reasons:
- The in-tree plugin will be deprecated in the future.
- Ceph RBD only works on Kubernetes with **hyperkube** images, and **hyperkube** images were
[deprecated since Kubernetes 1.17](https://github.com/kubernetes/kubernetes/pull/85094).
- Ceph CSI has more features such as cloning, expanding and snapshots.
## Ceph CSI
[Ceph-CSI](https://github.com/ceph/ceph-csi) contains Ceph Container Storage Interface (CSI) driver for RBD, CephFS. It will be a substitute for [Ceph-RBD](#Ceph RBD) in the future. Ceph CSI should be installed on v1.14.0+ Kubernetes, and work with 14.0.0 (Nautilus)+ Ceph Cluster. For details about compatibility, see [Support Matrix](https://github.com/ceph/ceph-csi#support-matrix). Here is an example of installing ceph-csi-rbd by **KubeKey** add-on.
### Ceph CSI RBD
Ceph-CSI needs to be installed on v1.14.0+ Kubernetes, and work with 14.0.0 (Nautilus)+ Ceph Cluster.
For details about compatibility, see [Ceph CSI Support Matrix](https://github.com/ceph/ceph-csi#support-matrix).
The following is an example of KubeKey add-on configurations for Ceph CSI RBD installed by **Helm Charts**.
As the StorageClass is not included in the chart, a StorageClass needs to be configured in the add-on config.
#### Chart Config
```yaml
csiConfig:
- clusterID: "cluster1"
monitors:
- SHOULD_BE_REPLACED
- "192.168.0.8:6789" # <--TobeReplaced-->
- "192.168.0.9:6789" # <--TobeReplaced-->
- "192.168.0.10:6789" # <--TobeReplaced-->
```
Save the YAML file of ceph config locally (e.g. **/root/ceph-csi-config.yaml**).
If you want to configure more values, see [chart configuration for ceph-csi-rbd](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd).
#### StorageClass (including secret)
```yaml
apiVersion: v1
kind: Secret
@ -85,7 +121,7 @@ metadata:
namespace: kube-system
stringData:
userID: admin
userKey: SHOULD_BE_REPLACED
userKey: "AQDoECFfYD3DGBAAm6CPhFS8TQ0Hn0aslTlovw==" # <--ToBeReplaced-->
encryptionPassphrase: test_passphrase
---
apiVersion: storage.k8s.io/v1
@ -98,7 +134,7 @@ metadata:
provisioner: rbd.csi.ceph.com
parameters:
clusterID: "cluster1"
pool: rbd
pool: "rbd" # <--ToBeReplaced-->
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: kube-system
@ -112,8 +148,9 @@ allowVolumeExpansion: true
mountOptions:
- discard
```
Save the YAML file of StorageClass locally (e.g. **/root/ceph-csi-rbd-sc.yaml**). The add-on configuration can be set like:
#### Add-On Config
Save the above chart config and StorageClass locally (e.g. `/root/ceph-csi-rbd.yaml` and `/root/ceph-csi-rbd-sc.yaml`). The add-on configuration can be set like:
```yaml
addons:
- name: ceph-csi-rbd
@ -122,18 +159,46 @@ addons:
chart:
name: ceph-csi-rbd
repo: https://ceph.github.io/csi-charts
values: /root/ceph-csi-config.yaml
values: /root/ceph-csi-rbd.yaml
- name: ceph-csi-rbd-sc
sources:
yaml:
path:
- /root/ceph-csi-rbd-sc.yaml
```
For more information, see [chart for ceph-csi-rbd](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd).
### Ceph RBD
KubeKey will never use **hyperkube** images. Hence, in-tree Ceph RBD may not work on Kubernetes installed by KubeKey. However, if your Ceph cluster is lower than 14.0.0 which means Ceph CSI can't be used, [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) can be used as a substitute for Ceph RBD. Its format is the same with [in-tree Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd).
The following is an example of KubeKey add-on configurations for rbd provisioner installed by **Helm Charts including a StorageClass**.
#### Chart Config
```yaml
ceph:
mon: "192.168.0.12:6789" # <--ToBeReplaced-->
adminKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced-->
userKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced-->
sc:
isDefault: false
```
If you want to configure more values, see [chart configuration for rbd-provisioner](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration).
#### Add-on Config
Save the above chart config locally (e.g. `/root/rbd-provisioner.yaml`). The add-on config for rbd provisioner cloud be like:
```yaml
- name: rbd-provisioner
namespace: kube-system
sources:
chart:
name: rbd-provisioner
repo: https://charts.kubesphere.io/test
values: /root/rbd-provisioner.yaml
```
## Glusterfs
Glusterfs is an in-tree storage plugin on Kubernetes. Only StorageClass needs to be installed.
[Glusterfs](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs) is an in-tree storage plugin in Kubernetes. Hence, **only StorageClass** needs to be installed.
The following is an example of KubeKey add-on configurations for glusterfs.
### StorageClass (including secret)
```yaml
apiVersion: v1
kind: Secret
@ -142,7 +207,7 @@ metadata:
namespace: kube-system
type: kubernetes.io/glusterfs
data:
key: SHOULD_BE_REPLACED
key: "MTIzNDU2" # <--ToBeReplaced-->
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
@ -152,25 +217,24 @@ metadata:
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]'
name: glusterfs
parameters:
clusterid: SHOULD_BE_REPLACED
clusterid: "21240a91145aee4d801661689383dcd1" # <--ToBeReplaced-->
gidMax: "50000"
gidMin: "40000"
restauthenabled: "true"
resturl: SHOULD_BE_REPLCADED # like "http://192.168.0.14:8080"
resturl: "http://192.168.0.14:8080" # <--ToBeReplaced-->
restuser: admin
secretName: heketi-secret
secretNamespace: kube-system
volumetype: SHOULD_BE_REPLACED # like replicate:2
volumetype: "replicate:2" # <--ToBeReplaced-->
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
```
For detailed information, see [Configuration](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs).
Save the YAML file of StorageClass locally (e.g. **/root/glusterfs-sc.yaml**). The add-on configuration can be set like:
```bash
### Add-on Config
Save the above StorageClass yaml locally (e.g. **/root/glusterfs-sc.yaml**). The add-on configuration can be set like:
```yaml
- addon
- name: glusterfs
sources:
@ -180,8 +244,12 @@ Save the YAML file of StorageClass locally (e.g. **/root/glusterfs-sc.yaml**). T
```
## OpenEBS/LocalVolumes
[OpenEBS](https://github.com/openebs/openebs) Dynamic Local PV provisioner can create Kubernetes Local Persistent Volumes using a unique
HostPath (directory) on the node to persist data. It is very convenient for users to get started with KubeSphere when they have no special storage system. If no default StorageClass is configured with **KubeKey** add-on, OpenEBS/LocalVolumes will be installed.
[OpenEBS](https://github.com/openebs/openebs) Dynamic Local PV provisioner can create Kubernetes Local Persistent Volumes
using a unique HostPath (directory) on the node to persist data.
It is very convenient for users to get started with KubeSphere when they have no special storage system.
If **no default StorageClass** is configured with **KubeKey** add-on, OpenEBS/LocalVolumes will be installed.
## Multi-Storage
If you intend to install more than one storage plugins, please only set one of them to be the default. Otherwise, [ks-installer](https://github.com/kubesphere/ks-installer) will be confused about which StorageClass to use.
If you intend to install more than one storage plugins, please only set one of them to be the default or
set `spec.persistence.storageClass` of `ClusterConfiguration` with the StorageClass name you want Kubesphere to use.
Otherwise, [ks-installer](https://github.com/kubesphere/ks-installer) will be confused about which StorageClass to use.

View File

@ -25,21 +25,25 @@ This tutorial walks you through an example of how to create Keepalived and HAPro
## Prepare Linux Hosts
This tutorial creates 9 virtual machines of **CentOS Linux release 7.6.1810 (Core)** with the default minimal installation. Every machine has 2 Cores, 4 GB memory and 40 G disk space.
This tutorial creates 8 virtual machines of **CentOS Linux release 7.6.1810 (Core)** with the default minimal installation. Every machine has 2 Cores, 4 GB memory and 40 G disk space.
| Host IP | Host Name | Role |
| --- | --- | --- |
|10.10.71.214|master1|master1, etcd|
|10.10.71.73|master2|master2, etcd|
|10.10.71.62|master3|master3, etcd|
|10.10.71.75|node1|node|
|10.10.71.76|node2|node|
|10.10.71.79|node3|node|
|10.10.71.67|vip|vip|
|10.10.71.214|master1|master, etcd|
|10.10.71.73|master2|master, etcd|
|10.10.71.62|master3|master, etcd|
|10.10.71.75|node1|worker|
|10.10.71.76|node2|worker|
|10.10.71.79|node3|worker|
|10.10.71.67|vip|vip(No need to create a VM)|
|10.10.71.77|lb-0|lb (Keepalived + HAProxy)|
|10.10.71.66|lb-1|lb (Keepalived + HAProxy)|
{{< notice warning >}}
The `vip` is a virtual IP, there is no need to create a virtual machine, so only 8 virtual machines need to be created.
{{</ notice >}}
Create virtual machines in the VMware Host Client. You can follow the New Virtual Machine wizard to create a virtual machine to place in the VMware Host Client inventory.
![create](/images/docs/vsphere/kubesphereOnVsphere-en-0-1-create.png)

View File

@ -17,9 +17,7 @@ KubeSphere separates [frontend](https://github.com/kubesphere/console) from [bac
| Back-end component | Function description |
|---|---|
| ks-account | Account service provides APIs for account and role management |
| ks-apiserver | The KubeSphere API server validates and configures data for the API objects which include Kubernetes objects. The API Server services REST operations and provides the frontend to the cluster's shared state through which all other components interact. |
| ks-apigateway | The API gateway is responsible for handling external requests for KubeSphere services. |
| ks-console | KubeSphere console offers KubeSphere console service |
| ks-controller-manager | KubeSphere controller takes care of business logic, for example, when create a workspace, the controller will automatically create corresponding permissions and configurations for it. |
| metrics-server | Kubernetes monitoring component collects metrics from Kubelet on each node. |

View File

@ -18,7 +18,7 @@ KubeSphere delivers **consolidated views while integrating a wide breadth of eco
As a lightweight platform, KubeSphere has become more friendly to different cloud ecosystems as it does not change Kubernetes itself at all. In other words, KubeSphere can be deployed **on any existing version-compatible Kubernetes cluster on any infrastructure** including virtual machine, bare metal, on-premises, public cloud and hybrid cloud. KubeSphere users have the choice of installing KubeSphere on cloud and container platforms, such as Alibaba Cloud, AWS, QingCloud, Tencent Cloud, Huawei Cloud and Rancher, and even importing and managing their existing Kubernetes clusters created using major Kubernetes distributions. The seamless integration of KubeSphere into existing Kubernetes platforms means that the business of users will not be affected, without any modification to their current resources or assets. For more information, see [Installing on Linux](../../installing-on-linux/) and [Installing on Kubernetes](../../installing-on-kubernetes/).
KubeSphere screens users from the infrastructure underneath and helps enterprises modernize, migrate, deploy and manage existing and containerized apps seamlessly across a variety of infrastructure types. This is how KubeSphere empowers developers and Ops teams to focus on application development and accelerate DevOps automated workflows and delivery processes with enterprise-level observability and troubleshooting, unified monitoring and logging, centralized storage and networking management, easy-to-use CI/CD pipelines, and so on.
KubeSphere hides the details of underlying infrastructure for users and helps enterprises modernize, migrate, deploy and manage existing and containerized apps seamlessly across a variety of infrastructure types. This is how KubeSphere empowers developers and Ops teams to focus on application development and accelerate DevOps automated workflows and delivery processes with enterprise-level observability and troubleshooting, unified monitoring and logging, centralized storage and networking management, easy-to-use CI/CD pipelines, and so on.
![KubeSphere Overview](https://pek3b.qingstor.com/kubesphere-docs/png/20200224091526.png)

View File

@ -135,7 +135,11 @@ kubectl -n kubesphere-system rollout restart deployment ks-apiserver
### Prepare a Member Cluster
<<<<<<< HEAD
In order to manage the member cluster within the host cluster, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host** cluster by the following command.
=======
In order to manage the member cluster within the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host cluster** by the following command.
>>>>>>> 6f7cebdc5f3d3de785f21e9c4021a0c805f04b0d
```bash
kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret

View File

@ -66,7 +66,11 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
### Prepare a Member Cluster
<<<<<<< HEAD
In order to manage the member cluster within the host cluster, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host** cluster by the following command.
=======
In order to manage the member cluster within the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host cluster** by the following command.
>>>>>>> 6f7cebdc5f3d3de785f21e9c4021a0c805f04b0d
```bash
kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret

View File

@ -1,10 +0,0 @@
---
title: "Project Members"
keywords: 'KubeSphere, kubernetes, docker, helm, jenkins, istio, prometheus'
description: 'Project Members'
linkTitle: "Project Members"
weight: 2130
---
TBD

View File

@ -1,10 +0,0 @@
---
title: "Project Roles"
keywords: 'KubeSphere, kubernetes, docker, helm, jenkins, istio, prometheus'
description: 'Volume Snapshots'
linkTitle: "Project Roles"
weight: 2130
---
TBD

View File

@ -0,0 +1,92 @@
---
title: "Role and Member Management"
keywords: 'KubeSphere, Kubernetes, role, member, management, project'
description: 'Role and Member Management in a Project'
linkTitle: "Role and Member Management"
weight: 2130
---
This guide demonstrates how to manage roles and members in your project. For more information about KubeSphere roles, see Overview of Role Management.
In project scope, you can grant the following resources' permissions to a role:
- Application Workloads
- Storage
- Configurations
- Monitoring & Alerting
- Project Settings
- Access Control
## Prerequisites
At least one project has been created, such as `demo-project`. Besides, you need an account of the `admin` role (e.g. `project-admin`) at the project level. See [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/) if it is not ready yet.
## Built-in Roles
In **Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a project is created and they cannot be edited or deleted. You can only review permissions and authorized users.
| Built-in Roles | Description |
| ------------------ | ------------------------------------------------------------ |
| viewer | The viewer who can view all resources in the project. |
| operator | The maintainer of the project who can manage resources other than users and roles in the project. |
| admin | The administrator in the project who can perform any action on any resource. It gives full control over all resources in the project. |
1. In **Project Roles**, click `admin` and you can see the role detail as shown below.
![view role details](/images/docs/project-admin/project_role_detail.png)
2. You can switch to **Authorized Users** tab to see all the users that are granted an `admin` role.
## Create a Project Role
1. Log in the console as `project-admin` and select a project (e.g. `demo-project`) under **Projects** list.
{{< notice note >}}
The account `project-admin` is used as an example. As long as the account you are using is granted a role including the authorization of **Project Members View**, **Project Roles Management** and **Project Roles View** in **Access Control** at project level, it can create a project role.
{{</ notice >}}
2. Go to **Project Roles** in **Project Settings**, click **Create** and set a **Role Identifier**. In this example, a role named `project-monitor` will be created. Click **Edit Authorization** to continue.
![Create a project role](/images/docs/project-admin/project_role_create_step1.png)
3. Select the authorization that you want the user granted this role to have. For example, **Application Workloads View** in **Application Workloads**, and **Alerting Messages View** and **Alerting Policies View** in **Monitoring & Alerting** are selected for this role. Click **OK** to finish.
![Edit Authorization](/images/docs/project-admin/project_role_create_step2.png)
{{< notice note >}}
**Depend on** means the major authorization (the one listed after **Depend on**) needs to be selected first so that the affiliated authorization can be assigned.
{{</ notice >}}
4. Newly-created roles will be listed in **Project Roles**. You can click the three dots on the right to edit it.
![Edit Roles](/images/docs/project-admin/project_role_list.png)
{{< notice note >}}
The role of `project-monitor` is only granted limited permissions in **Monitoring & Alerting**, which may not satisfy your need. This example is only for demonstration purpose. You can create customized roles based on your needs.
{{</ notice >}}
## Invite a New Member
1. In **Project Settings**, select **Project Members** and click **Invite Member**.
2. Invite a user to the project. Grant the role of `project-monitor` to the user.
![invite member](/images/docs/project-admin/project_invite_member_step2.png)
{{< notice note >}}
The user must be invited to the project's workspace first.
{{</ notice >}}
3. After you add a user to the project, click **OK**. In **Project Members**, you can see the newly invited member listed.
4. You can also change the role of an existing member by editing it or remove it from the project.
![edit member role](/images/docs/project-admin/project_user_edit.png)

View File

@ -1,10 +1,108 @@
---
title: "Jobs"
keywords: 'kubesphere, kubernetes, docker, jobs'
description: 'Create a Kubernetes Job'
keywords: "kubesphere, kubernetes, docker, jobs"
description: "Create a Kubernetes Job"
weight: 2260
---
TBD
A Job creates one or more Pods and ensures that a specified number of them successfully terminate. As pods successfully complete, the Job tracks the successful completions. When a specified number of successful completions is reached, the task (ie, Job) is complete. Deleting a Job will clean up the Pods it created.
A simple case is to create one Job object in order to reliably run one Pod to completion. The Job object will start a new Pod if the first Pod fails or is deleted (for example due to a node hardware failure or a node reboot).
You can also use a Job to run multiple Pods in parallel.
## Prerequisites
- You need to create a workspace, project and `project-regular` account. Please refer to the [Getting Started with Multi-tenant Management](../../../quick-start/create-workspace-and-project) if not yet.
- You need to sign in with `project-admin` account and invite `project-regular` to enter the corresponding project if not yet. Please refer to [Invite Member](../../../quick-start/create-workspace-and-project#task-3-create-a-project).
## Create a Job
### Step 1. Open Modal
1. Go to **Application Workloads** and click **Jobs**.
2. Click **Create** button to open the modal.
![](/images/docs/job-list.png)
### Step 2. Basic Info
Enter the basic information.
- **Name**: The name of the job, which is also the unique identifier.
- **Alias**: The alias name of the job, making resources easier to identify.
![](/images/docs/job-create-basic-info.png)
### Step 3. Job Settings
Enter the job parameters (optional).
![](/images/docs/job-create-job-settings.png)
#### Job Parameters
| Name | Definition | Description |
| ----------------------- | ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Back of Limit | `spec.backoffLimit` | Specifies the number of retries before marking this job failed. Defaults to 6 |
| Completions | `spec.completions` | Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ |
| Parallelism | `spec.parallelism` | Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ |
| Active Deadline Seconds | `spec.activeDeadlineSeconds` | Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer |
### Step 4. Container Image
Set up the **container images**.
![](/images/docs/job-container-settings.png)
- **Restart Policy** can only specify `Never` or `OnFailure`, when the job is not completed:
- If Restart Policy specifies `Never`, the job creates a new Pod when the Pod fails, and the failed Pod does not disappear.
- If Restart Policy specifies `OnFailure`, the job will internally restart the container when the Pod fails, instead of creating a new Pod.
- To add a container for the job, please refer to [Pod Containers](../deployments) for details.
### Step 5. Mount Volumes
Refer to [Pod Volumes](../deployments) for details.
### Step 6. Advanced Settings
Refer to [Deployment Advanced Settings](../deployments) for details.
### Step 7. Check Result
If success, a new item will be added the Job list.
![](/images/docs/job-list-new.png)
## Check Job detail
You can check the job's detail via click job's name in the list.
### Job Operations
- **Edit Info**: Editing the basic information except `Name` of the job, .
- **Rerun Job**: Re-run the job, the pod will restart, and a new execution record will be generated.
- **View YAML**: View the job's specification in YAML format.
- **Delete**: Delete the job, and return to the job list page.
![](/images/docs/job-actions.png)
### Execution Records
You can check the execution records of the job.
![](/images/docs/job-detail-records.jpg)
### Resource Status
Click **Resource Status** tab to check the pods of the job.
- The pod list provides the pod's detail information(conditions, phase, node, pod ip, monitoring).
- You can view the container info by click the pod item.
- Click the container log icon to view the output logs of the container.
- You can view the pod detail page by click the pod name.
![](/images/docs/job-detail-pods.png)

View File

@ -178,7 +178,7 @@ A route refers to Ingress in Kubernetes, which is an API object that manages ext
{{< notice note >}}
If you want to expose services using the type `LoadBalancer`, you need to use the [LoadBalancer plugin of cloud providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). If your Kubernetes cluster is running in a bare metal environment, it is recommended you use [Porter](https://github.com/kubesphere/porter) as the LoadBalancer plugin.
If you want to expose services using the type `LoadBalancer`, you need to use the LoadBalancer plugin of cloud providers. If your Kubernetes cluster is running in a bare metal environment, it is recommended you use [Porter](https://github.com/kubesphere/porter) as the LoadBalancer plugin.
{{</ notice >}}

View File

@ -0,0 +1,22 @@
---
title: "Toolbox"
description: "Help you to better understand KubeSphere toolbox"
layout: "single"
linkTitle: "Toolbox"
weight: 5500
icon: "/images/docs/docs.svg"
---
This chapter demonstrates how to use toolbox of KubeSphere to perform log queries and run commands with web kubectl.
## [Log Query](../toolbox/log-query/)
Understand how you can perform quick log queries to keep track of the latest logs of your cluster.
## [Web Kubectl](../toolbox/web-kubectl/)
The web kubectl tool is integrated into KubeSphere to provide consistent user experiences for Kubernetes users.

View File

@ -0,0 +1,85 @@
---
title: "Log Query"
keywords: 'KubeSphere, Kubernetes, log'
description: 'Query Kubernetes logs from toolbox'
linkTitle: "Log Query"
weight: 5510
---
The logs of applications and systems can help you better understand what is happening inside your cluster and workloads. The logs are particularly useful for debugging problems and monitoring cluster activities. KubeSphere provides a powerful and easy-to-use logging system which offers users the capabilities of log collection, query and management from the perspective of tenants. The tenant-based logging system is much more useful than Kibana since different tenants can only view their own logs, leading to better security. Moreover, KubeSphere logging system filters out some redundant information so that tenants can only focus on logs that are useful to them.
## Objective
In this tutorial, you will learn how to use the log query function, including the interface, search parameters and detail pages.
## Prerequisites
- You need to enable [KubeSphere Logging System](../../pluggable-components/logging/).
## Enter Log Query Interface
1. The log query function is available for all users. Log in the console with any account, hover over the **Toolbox** in the lower right corner and select **Log Search**.
![log-query-guide](/images/docs/log-query/log-query-guide.png)
2. As shown in the pop-up window, you can see a time histogram of log numbers, a cluster selection drop-down list and a log search bar.
![log-query-interface](/images/docs/log-query/log-query-interface.png)
{{< notice note >}}
- KubeSphere supports log queries on each cluster separately. You can switch the target cluster using the drop-down list next to the log search bar.
- Supported fields in the log search bar:
- Keyword
- Project
- Workload
- Pod
- Container
- Time Range
- The keyword field supports the query of keyword combinations. For example, you can use "Error", "Fail", "Fatal", "Exception", and "Warning" together to query all the exception logs.
- The keyword field supports exact query and fuzzy query. The fuzzy query provides case-insensitive fuzzy matching and retrieval of full terms by the first half of a word or phrase based on the ElasticSearch segmentation rules. For example, you can retrieve the logs containing `node_cpu_total` by searching the keyword `node_cpu` instead of the keyword `cpu`.
{{</ notice >}}
![log-query-time-range](/images/docs/log-query/log-query-time-range.png)
3. You can customize the query time range by selecting **Time Range** in the log search bar. Alternatively, click on the bars in the time histogram, and KubeSphere will use the time range of that bar for log queries.
{{< notice note >}}
- KubeSphere stores logs for last seven days by default.
- Each cluster has its own log retention period which can be set separately. You can modify it in `ClusterConfiguration`. Refer to [KubeSphere Logging System](../../pluggable-components/logging/) for more details.
{{</ notice >}}
## Use Search Parameters
1. You can provide as many fields as possible to narrow down your search results. Below is an example of a log query on the cluster `product` with the keyword `error` in the project `kubesphere-system` within `last 12 hours`.
![log-query-log-search](/images/docs/log-query/log-query-log-search.png)
2. It returns logs of 13 rows with the corresponding time, project, pod and container information all displayed.
3. Click any one of the results from the list. Drill into its detail page and inspect the log from this pod, including the complete context on the right. It is convenient for developers in terms of debugging and analyzing.
{{< notice note >}}
The log query interface supports dynamic refreshing with 5s, 10s or 15s, and allows users to export logs to a local file for further analysis (in the top-right corner).
{{</ notice >}}
![log-query-log-detail](/images/docs/log-query/log-query-log-detail.png)
4. As you can see from the left panel, you can switch between pods and inspect its containers within the same project from the drop-down list. In this case, you can detect if any abnormal pods affect other pods.
![log-query-inspect-other-pods](/images/docs/log-query/log-query-inspect-other-pods.png)
## Drill into Detail Page
1. If the log looks abnormal, you can drill into the pod detail page or container detail page to further inspect container logs, resource monitoring graphs and events.
![log-query-drill](/images/docs/log-query/log-query-drill.png)
2. Inspect the container detail page as follows. At the same time, it allows you to open the terminal to debug the container directly.
![log-query-drill-container](/images/docs/log-query/log-query-drill-container.png)

View File

@ -0,0 +1,52 @@
---
title: "Web Kubectl"
keywords: 'KubeSphere, Kubernetes, kubectl, cli'
description: 'Use kubectl from toolbox'
linkTitle: "Web Kubectl"
weight: 5515
---
The Kubernetes command-line tool, kubectl, allows you to run commands on Kubernetes clusters. You can use kubectl to deploy applications, inspect and manage cluster resources, and view logs.
KubeSphere provides web kubectl on the console for user convenience. By default, in the current version, only the account granted the `platform-admin` role (such as the default account `admin`) has the permission to use web kubectl for cluster resource operation and management.
## Objective
In this tutorial, you will learn how to use web kubectl to operate on and manage cluster resources.
## Use Web Kubectl
1. Log in KubeSphere with an account granted the `platform-admin` role, hover over the **Toolbox** in the lower right corner and select **Kubectl**.
![web-kubectl-enter](/images/docs/web-kubectl/web-kubectl-enter.png)
2. You can see the kubectl interface as shown in the pop-up window. If you have enabled the multi-cluster feature, you need to select the target cluster first from the drop-down list in the upper right corner. This drop-down list is not visible if the multi-cluster feature is not enabled.
![web-kubectl-cluster-select](/images/docs/web-kubectl/web-kubectl-cluster-select.png)
3. Enter kubectl commands in the command-line tool to query and manage Kubernetes cluster resources. For example, execute the following command to query the status of all PVCs in the cluster.
```bash
kubectl get pvc --all-namespaces
```
![web-kubectl-example](/images/docs/web-kubectl/web-kubectl-example.png)
4. Use the following syntax to run kubectl commands from your terminal window:
```bash
kubectl [command] [TYPE] [NAME] [flags]
```
{{< notice note >}}
- Where `command`, `TYPE`, `NAME`, and `flags` are:
- `command`: Specifies the operation that you want to perform on one or more resources, such as `create`, `get`, `describe` and `delete`.
- `TYPE`: Specifies the [resource type](https://kubernetes.io/docs/reference/kubectl/overview/#resource-types). Resource types are case-insensitive and you can specify the singular, plural, or abbreviated forms.
- `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, details for all resources are displayed, such as `kubectl get pods`.
- `flags`: Specifies optional flags. For example, you can use the `-s` or `--server` flags to specify the address and port of the Kubernetes API server.
- If you need help, run `kubectl help` from the terminal window or refer to the [Kubernetes kubectl CLI documentation](https://kubernetes.io/docs/reference/kubectl/overview/).
{{</ notice >}}

View File

@ -71,7 +71,7 @@ Upgrading steps are different for single-node clusters (all-in-one) and multi-no
The following command upgrades your single-node cluster to KubeSphere v3.0.0 and Kubernetes v1.17.9 (default):
```bash
./kk upgrade --with-kubesphere --with-kubernetes
./kk upgrade --with-kubernetes v1.17.9 --with-kubesphere v3.0.0
```
To upgrade Kubernetes to a specific version, please explicitly provide the version after the flag `--with-kubernetes`. Available versions are:
@ -112,7 +112,7 @@ Please refer to the Cluster section of [config-example.yaml](https://github.com/
The following command upgrades your cluster to KubeSphere v3.0.0 and Kubernetes v1.17.9 (default):
```bash
./kk upgrade --with-kubesphere --with-kubernetes -f config-sample.yaml
./kk upgrade --with-kubernetes v1.17.9 --with-kubesphere v3.0.0 -f config-sample.yaml
```
To upgrade Kubernetes to a specific version, please explicitly provide the version after the flag `--with-kubernetes`. Available versions are:

View File

@ -1,10 +0,0 @@
---
title: "Role and Member Management"
keywords: "kubernetes, workspace, kubesphere, multitenancy"
description: "Role and Member Management in a Workspace"
linkTitle: "Role and Member Management"
weight: 200
---
TBD

View File

@ -0,0 +1,93 @@
---
title: "Role and Member Management"
keywords: "Kubernetes, workspace, KubeSphere, multitenancy"
description: "Role and Member Management in a Workspace"
linkTitle: "Role and Member Management"
weight: 200
---
This guide demonstrates how to manage roles and members in your workspace. For more information about KubeSphere roles, see Overview of Role Management.
In workspace scope, you can grant the following resources' permissions to a role:
- Projects
- DevOps
- Access Control
- Apps Management
- Workspace Settings
## Prerequisites
At least one workspace has been created, such as `demo-workspace`. Besides, you need an account of the `workspace-admin` role (e.g. `ws-admin`) at the workspace level. See [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/) if it is not ready yet.
{{< notice note >}}
The actual role name follows a naming convention: `workspace name-role name`. For example, for a workspace named `demo-workspace`, the actual role name of the role `workspace-admin` is `demo-workspace-admin`.
{{</ notice >}}
## Built-in Roles
In **Workspace Roles**, there are four available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a workspace is created and they cannot be edited or deleted. You can only review permissions and authorized users.
| Built-in Roles | Description |
| ------------------ | ------------------------------------------------------------ |
| workspace-viewer | The viewer in the workspace who can view all resources in the workspace. |
| workspace-self-provisioner | The regular user in the workspace who can create projects and DevOps projects. |
| workspace-regular | The regular user in the workspace who cannot create projects or DevOps projects. |
| workspace-admin | The administrator in the workspace who can perform any action on any resource. It gives full control over all resources in the workspace. |
1. In **Workspace Roles** , click `workspace-admin` and you can see the role detail as shown below.
![invite member](/images/docs/ws-admin/workspace_role_detail.png)
2. You can switch to **Authorized Users** tab to see all the users that are granted a `workspace-admin` role.
## Create a Workspace Role
1. Log in the console as `ws-admin` and go to **Workspace Roles** in **Workspace Settings**.
{{< notice note >}}
The account `ws-admin` is used as an example. As long as the account you are using is granted a role including the authorization of **Workspace Members View**, **Workspace Roles Management** and **Workspace Roles View** in **Access Control** at the workspace level, it can create a workspace role.
{{</ notice >}}
2. In **Workspace Roles**, click **Create** and set a **Role Identifier**. In this example, a role named `workspace-projects-admin` will be created. Click **Edit Authorization** to continue.
![Create a workspace role](/images/docs/ws-admin/workspace_role_create_step1.png)
3. In **Projects management**, select the authorization that you want the user granted this role to have. For example, **Projects Create**, **Projects Management**, and **Projects View** are selected for this role. Click **OK** to finish.
![Edit Authorization](/images/docs/ws-admin/workspace_role_create_step2.png)
{{< notice note >}}
**Depend on** means the major authorization (the one listed after **Depend on**) needs to be selected first so that the affiliated authorization can be assigned.
{{</ notice >}}
4. Newly-created roles will be listed in **Workspace Roles**. You can click the three dots on the right to edit it.
![Edit Roles](/images/docs/ws-admin/workspace_role_edit.png)
{{< notice note >}}
The role of `workspace-projects-admin` is only granted **Projects Create**, **Projects Management**, and **Projects View**, which may not satisfy your need. This example is only for demonstration purpose. You can create customized roles based on your needs.
{{</ notice >}}
## Invite a New Member
1. In **Workspace Settings**, select **Workspace Members** and click **Invite Member**.
2. Invite a user to the workspace. Grant the role `workspace-projects-admin` to the user.
![invite member](/images/docs/ws-admin/workspace_invite_user.png)
3. After you add a user to the workspace, click **OK**. In **Workspace Members**, you can see the newly invited member listed.
4. You can also change the role of an existing member by editing it or remove it from the workspace.
![edit member role](/images/docs/ws-admin/workspace_user_edit.png)

View File

@ -0,0 +1,13 @@
---
title: "support"
css: "scss/contribution.scss"
section1:
title: 'Community is the Soul of KubeSphere'
content: 'Join the community to get help, get involved, or get updates and KubeSphere news!'
topImage: "/images/contribution/contribution-top.jpg"
sectionIframe:
formUrl: https://jinshuju.net/f/bDS8me/embedded.js?inner_redirect=false&banner=show&background=white&height=1838
---

View File

@ -2,7 +2,7 @@
title: "Documentation"
css: "scss/docs.scss"
LinkTitle: "Documentation"
LinkTitle: "文档"
section1:

View File

@ -1,71 +1,71 @@
---
title: "Deploy KubeSphere on Oracle OKE"
keywords: 'Kubernetes, KubeSphere, OKE, Installation, Oracle-cloud'
description: 'How to install KubeSphere on Oracle OKE'
title: "在 Oracle OKE 上部署 KubeSphere"
keywords: 'Kubernetes, KubeSphere, OKE, 安装, Oracle-cloud'
description: '如何在 Oracle OKE 上安装 KubeSphere'
weight: 2247
---
This guide walks you through the steps of deploying KubeSphere on [Oracle Kubernetes Engine](https://www.oracle.com/cloud/compute/container-engine-kubernetes.html).
本文演示在 [Oracle Kubernetes Engine](https://www.oracle.com/cn/cloud/compute/container-engine-kubernetes.html) 上部署 KubeSphere 的步骤。
## Create a Kubernetes Cluster
## 创建 Kubernetes 集群
- A standard Kubernetes cluster in OKE is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster.
1. 在 OKE 上创建一个标准的 Kubernetes 集群是安装 KubeSphere 的前提条件。在导航栏中,请参考下图创建集群。
![oke-cluster](https://ap3.qingstor.com/kubesphere-website/docs/oke-cluster.jpg)
![创建集群](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/创建集群.jpg)
- In the pop-up window, select **Quick Create** and click **Launch Workflow**.
2. 在弹出窗口中,选择**快速创建**并点击**启动工作流**。
![oke-quickcreate](https://ap3.qingstor.com/kubesphere-website/docs/oke-quickcreate.jpg)
![快速创建](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/快速创建.jpg)
{{< notice note >}}
In this example, **Quick Create** is used for demonstration which will automatically create all the resources necessary for a cluster in Oracle Cloud. If you select **Custom Create**, you need to create all the resources (such as VCN and LB Subnets) yourself.
本示例演示**快速创建**Oracle Cloud 通过此模式会为集群自动创建所必需的资源。如果您选择**定制创建**,您需要自己创建所有资源(例如 VCN 和负载均衡器子网)。
{{</ notice >}}
- Next, you need to set the cluster with basic information. Here is an example for your reference. When you finish, click **Next**.
3. 接下来,您需要为集群设置基本信息(可参考以下图例)。完成后,请点击**下一步**。
![](https://ap3.qingstor.com/kubesphere-website/docs/cluster-setting.jpg)
![集群基本信息](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/集群基本信息.jpg)
{{< notice note >}}
- Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
- It is recommended that you should select **Public** for **Visibility Type**, which will assign a public IP address for every node. The IP address can be used later to access the web console of KubeSphere.
- In Oracle Cloud, a Shape is a template that determines the number of CPUs, amount of memory, and other resources that are allocated to an instance. `VM.Standard.E2.2 (2 CPUs and 16G Memory)` is used in this example. For more information, see [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard).
- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
- KubeSphere 3.0.0 所支持的 Kubernetes 版本1.15.x, 1.16.x, 1.17.x, 1.18.x。
- 建议您在**可见性类型**中选择**公共**,即每个节点会分配到一个公共 IP 地址,此地址之后可用于访问 KubeSphere Web 控制台。
- 在 Oracle Cloud 中,**配置**定义了一个实例会分配到的 CPU 和内存等资源量,本示例使用 `VM.Standard.E2.2 (2 CPUs and 16G Memory)`。有关更多信息,请参见 [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard)
- 本示例包含 3 个节点,您可以根据需求自行添加节点(尤其是生产环境)。
{{</ notice >}}
- Review cluster information and click **Create Cluster** if no adjustment is needed.
4. 检查集群信息,确认无需修改后点击**创建集群**。
![](https://ap3.qingstor.com/kubesphere-website/docs/create-cluster.jpg)
![完成创建集群](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/完成创建集群.jpg)
- After the cluster is created, click **Close**.
5. 集群创建后,点击**关闭**。
![cluster-ready](https://ap3.qingstor.com/kubesphere-website/docs/cluster-ready.jpg)
![集群创建完成](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/集群创建完成.jpg)
- Make sure the Cluster Status is **Active** and click **Access Cluster**.
6. 确保集群状态为**活动**后,点击**访问集群**。
![access-cluster](https://ap3.qingstor.com/kubesphere-website/docs/access-cluster.jpg)
![访问集群](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/访问集群.jpg)
- In the pop-up window, select **Cloud Shell Access** to access the cluster. Click **Launch Cloud Shell** and copy the code provided by Oracle Cloud.
7. 在弹出窗口中,选择 **Cloud Shell 访问权限**。点击**启动 Cloud Shell**,并将 Oracle Cloud 所提供的命令复制到 Cloud Shell。
![cloud-shell-access](https://ap3.qingstor.com/kubesphere-website/docs/cloudshell-access.png)
![启动Cloud-shell](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/启动Cloud-shell.jpg)
- In Cloud Shell, paste the command so that we can execute the installation command later.
8. 在 Cloud Shell 中,粘贴该命令以便之后可以执行 KubeSphere 安装命令。
![cloud-shell-oke](https://ap3.qingstor.com/kubesphere-website/docs/oke-cloud-shell.png)
![cloud-shell-oke](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/cloud-shell-oke.jpg)
{{< notice warning >}}
If you do not copy and execute the command above, you cannot proceed with the steps below.
如果您不在 Cloud Shell 中执行该命令,您无法继续进行以下操作。
{{</ notice >}}
## Install KubeSphere on OKE
## 在 OKE 上安装 KubeSphere
- Install KubeSphere using kubectl. The following command is only for the default minimal installation.
1. 使用 kubectl 安装 KubeSphere。直接输入以下命令会默认执行 KubeSphere 的最小化安装。
```bash
kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.0/deploy/kubesphere-installer.yaml
@ -75,13 +75,13 @@ kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.
kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/v3.0.0/deploy/cluster-configuration.yaml
```
- Inspect the logs of installation:
2. 检查安装日志:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
- When the installation finishes, you can see the following message:
3. 安装完成后会输出以下信息:
```bash
#####################################################
@ -104,37 +104,37 @@ NOTES
https://kubesphere.io 20xx-xx-xx xx:xx:xx
```
## Access KubeSphere Console
## 访问 KubeSphere 控制台
Now that KubeSphere is installed, you can access the web console of KubeSphere either through `NodePort` or `LoadBalancer`.
KubeSphere 安装完成后,您可以通过 `NodePort``LoadBalancer` 的模式访问 KubeSphere 的 Web 控制台。
- Check the service of KubeSphere console through the following command:
1. 通过以下命令查看 KubeSphere 控制台的服务状态。
```bash
kubectl get svc -n kubesphere-system
```
- The output may look as below. You can change the type to `LoadBalancer` so that the external IP address can be exposed.
2. 输出如下,您可以将类型修改为 `LoadBalancer`,从而暴露外部 IP 地址。
![console-nodeport](https://ap3.qingstor.com/kubesphere-website/docs/nodeport-console.jpg)
{{< notice tip >}}
It can be seen above that the service `ks-console` is being exposed through NodePort, which means you can access the console directly via `NodeIP:NodePort` (the public IP address of any node is applicable). You may need to open port `30880` in firewall rules.
在上图中,`ks-console` 服务通过 `NodePort` 的类型暴露,即您可以通过 `NodeIP:NodePort` 的方式直接访问 Web 控制台(任意节点的公共 IP 都可用)。您可能需要在防火墙中提前开启端口 30880。
{{</ notice >}}
- Execute the command to edit the service configuration.
3. 执行以下命令编辑服务配置。
```bash
kubectl edit svc ks-console -o yaml -n kubesphere-system
```
- Navigate to `type` and change `NodePort` to `LoadBalancer`. Save the configuration after you finish.
4. 将 `type` 字段所对应的值修改为 `LoadBalancer`,然后保存配置。
![](https://ap3.qingstor.com/kubesphere-website/docs/change-service-type.png)
- Execute the following command again and you can see the IP address displayed as below.
5. 再次执行以下命令,您可以看到 IP 地址现已暴露(如下图)。
```bash
kubectl get svc -n kubesphere-system
@ -142,11 +142,11 @@ kubectl get svc -n kubesphere-system
![console-service](https://ap3.qingstor.com/kubesphere-website/docs/console-service.png)
- Log in the console through the external IP address with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard shown below:
6. 访问此外部 IP 地址并通过默认的账号和密码 (`admin/P@88w0rd`) 登录 Web 控制台。在**集群管理**页面,您可以看到集群概览。
![kubesphere-oke-dashboard](https://ap3.qingstor.com/kubesphere-website/docs/kubesphere-oke-dashboard.png)
![概览页面](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/概览页面.jpg)
## Enable Pluggable Components (Optional)
## 启用可拔插组件(可选)
The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details.
以上示例演示默认最小化安装的流程。若要启用 KubeSphere 中的其他组件,请参见[启用可拔插组件](../../../pluggable-components/)查看详细步骤。

View File

@ -13,15 +13,15 @@ As an open-source project on [GitHub](https://github.com/kubesphere), KubeSphere
Users are provided with multiple installation options. Please note not all options are mutually exclusive. For instance, you can deploy KubeSphere with minimal packages on multiple nodes in an air-gapped environment.
- [All-in-One](../all-in-one): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere.
- [Multi-Node](../multi-node): Install KubeSphere on multiple nodes. It is for testing or development.
- [Install KubeSphere on Air-gapped Linux](../install-ks-on-linux-airgapped): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines.
- [High Availability Installation](../master-ha): Install high availability KubeSphere on multiple nodes which is used for the production environment.
- [All-in-One](../../../quick-start/all-in-one-on-linux/): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere.
- [Multi-Node](../multioverview/): Install KubeSphere on multiple nodes. It is for testing or development.
- [Air-gapped Installation on Linux](../air-gapped-installation): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines.
- High Availability Installation: Install high availability KubeSphere on multiple nodes which is used for the production environment.
- Minimal Packages: Only install the minimum required system components of KubeSphere. Here is the minimum resource requirement:
- 2vCPUs
- 4GB RAM
- 40GB Storage
- [Full Packages](../complete-installation): Install all available system components of KubeSphere such as DevOps, service mesh, and alerting.
- Full Packages: Install all available system components of KubeSphere such as DevOps, service mesh, and alerting.
For the installation on Kubernetes, see Overview of Installing on Kubernetes.
@ -31,7 +31,7 @@ For the installation on Kubernetes, see Overview of Installing on Kubernetes.
- For all-in-one installation, the only one node is both the master and the worker.
- For multi-node installation, you need to specify the node roles in the configuration file before installation.
- Your linux host must have OpenSSH Server installed.
- Please check the [ports requirements](../port-firewall) before installation.
- Please check [Port Requirements](../port-firewall) before installation.
## KubeKey
@ -45,13 +45,13 @@ Three scenarios to use KubeKey:
{{< notice note >}}
If you have existing Kubernetes clusters, please refer to [Installing on Kubernetes](https://kubesphere.io/docs/installing-on-kubernetes/).
If you have existing Kubernetes clusters, please refer to [Installing on Kubernetes](../../../installing-on-kubernetes/).
{{</ notice >}}
## Quick Installation for Development and Testing
KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Overview of Pluggable Components](../intro#pluggable-components-overview) for details.
KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Enable Pluggable Components](../../../pluggable-components/) for details.
The quick installation of KubeSphere is only for development or testing since it uses local volume for storage by default. If you want a production installation, see HA Cluster Configuration.
@ -60,7 +60,7 @@ The quick installation of KubeSphere is only for development or testing since it
{{< notice note >}}
For air-gapped installation, please refer to [Install KubeSphere on Air Gapped Linux Machines](../install-ks-on-linux-airgapped).
For air-gapped installation, please refer to [this tutorial](../air-gapped-installation/).
{{</ notice >}}
@ -69,7 +69,7 @@ For air-gapped installation, please refer to [Install KubeSphere on Air Gapped L
KubeKey allows users to install a highly available cluster for production. Users need to configure load balancers and persistent storage services in advance.
- [Persistent Storage Configuration](../storage-configuration): By default, KubeKey uses [Local Volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) based on [openEBS](https://openebs.io/) to provide storage services with dynamic provisioning in Kubernetes clusters. It is convenient for the quick installation of a testing environment. In a production environment, it must have a storage server set up. Please refer to [Persistent Storage Configuration](../storage-configuration) for details.
- [Load Balancer Configuration for HA installation](../master-ha): Before you get started with multi-node installation in a production environment, you need to configure load balancers. Cloud load balancers, Nginx and `HAproxy + Keepalived` all work for the installation.
- Load Balancer Configuration for HA installation: Before you get started with multi-node installation in a production environment, you need to configure load balancers. Cloud load balancers, Nginx and `HAproxy + Keepalived` all work for the installation.
For more information, see HA Cluster Configuration. You can also see the specific step of HA installations across major cloud providers in Installing on Public Cloud.
@ -93,7 +93,7 @@ The following links explain how to configure different types of persistent stora
### Add New Nodes
With KubeKey, you can scale the number of nodes to meet higher resource needs after the installation, especially in a production environment. For more information, see [Add New Nodes](../add-nodes).
With KubeKey, you can scale the number of nodes to meet higher resource needs after the installation, especially in a production environment. For more information, see [Add New Nodes](../../../installing-on-linux/cluster-operation/add-new-nodes/).
### Remove Nodes
@ -105,8 +105,8 @@ KubeKey allows you to set a new storage class after the installation. You can se
For more information, see Add New Storage Classes.
## Uninstall
## Uninstalling
Uninstalling KubeSphere means it will be removed from the machines, which is irreversible. Please be cautious with the operation.
For more information, see [Uninstall](../uninstall).
For more information, see [Uninstalling](../../../installing-on-linux/uninstalling/uninstalling-kubesphere-and-kubernetes/).

View File

@ -22,20 +22,24 @@ description: 'How to install KubeSphere on VMware vSphere Linux machines'
## 创建主机
本示例创建 9**CentOS Linux release 7.6.1810Core** 的虚拟机,默认的最小化安装,每台配置为 2 Core 4 GB 40 G 即可。
本示例创建 8**CentOS Linux release 7.6.1810Core** 的虚拟机,默认的最小化安装,每台配置为 2 Core 4 GB 40 G 即可。
| 主机 IP | 主机名称 | 角色 |
| --- | --- | --- |
|10.10.71.214|master1|master1, etcd|
|10.10.71.73|master2|master2, etcd|
|10.10.71.62|master3|master3, etcd|
|10.10.71.75|node1|node|
|10.10.71.76|node2|node|
|10.10.71.79|node3|node|
|10.10.71.67|vip|vip|
|10.10.71.214|master1|master, etcd|
|10.10.71.73|master2|master, etcd|
|10.10.71.62|master3|master, etcd|
|10.10.71.75|node1|worker|
|10.10.71.76|node2|worker|
|10.10.71.79|node3|worker|
|10.10.71.67|vip|虚拟 IP不是实际的主机|
|10.10.71.77|lb-0|lbkeepalived + haproxy|
|10.10.71.66|lb-1|lbkeepalived + haproxy|
{{< notice warning >}}
vip 所在的是虚拟 IP并不需要创建主机所以只需要创建8台虚拟机。
{{</ notice >}}
选择可创建的资源池,点击右键-新建虚拟机(创建虚拟机入口请好几个,自己选择)
![0-1-新创](/images/docs/vsphere/kubesphereOnVsphere-zh-0-1-1-create-type.png)

View File

@ -1,9 +1,9 @@
---
title: "Introduction"
description: "Help you to better understand KubeSphere with detailed graphics and contents"
title: "产品介绍"
description: "通过详细的图文介绍帮助您更好地了解 KubeSphere"
layout: "single"
linkTitle: "Introduction"
linkTitle: "产品介绍"
weight: 1000
@ -11,29 +11,28 @@ icon: "/images/docs/docs.svg"
---
This chapter gives you an overview of the basic concept of KubeSphere, features, advantages, uses cases and more.
本章概述了 KubeSphere 的基本概念、功能、优势以及应用场景等。
## [What is KubeSphere](https://kubesphere.io/docs/introduction/what-is-kubesphere/)
## [什么是 KubeSphere](../introduction/what-is-kubesphere/)
Develop a basic understanding of KubeSphere and highlighted features of its latest version.
使您对 KubeSphere 有基本的了解,并介绍最新版本的新特性。
## [Features](https://kubesphere.io/docs/introduction/features/)
## [平台功能](../introduction/features/)
Get started with KubeSphere by understanding what KubeSphere is capable of and how you can make full use of it.
了解 KubeSphere 的功能以及如何充分利用 KubeSphere。
## [Architecture](https://kubesphere.io/docs/introduction/architecture/)
## [架构说明](../introduction/architecture/)
Explore the structure of KubeSphere to get a clear view of the components both at front end and back end.
探索 KubeSphere 的结构,以清晰地了解前端和后端的组件。
## [Advantages](https://kubesphere.io/docs/introduction/advantages/)
## [为什么选择 KubeSphere](../introduction/advantages/)
Understand the reason why KubeSphere is beneficial to your work.
了解 KubeSphere 如何给您的工作带来帮助。
## [Use Cases](https://kubesphere.io/docs/introduction/scenarios/)
## [应用场景](../introduction/scenarios/)
See how KubeSphere can be used in different scenarios, such as multi-cluster deployment, DevOps and service mesh.
了解如何在不同的场景中使用 KubeSphere例如敏捷开发与自动化运维、DevOps 持续集成与交付等。
## [Glossary](https://kubesphere.io/docs/introduction/glossary/)
Learn terms and phrases that are used in KubeSphere.
## [名词解释](../introduction/glossary/)
了解 KubeSphere 中涉及到的基本概念。

View File

@ -1,60 +1,60 @@
---
title: "What is KubeSphere"
keywords: 'Kubernetes, KubeSphere, Introduction'
description: 'What is KubeSphere'
title: "什么是 KubeSphere"
keywords: 'Kubernetes, KubeSphere, 介绍'
description: '什么是 KubeSphere'
weight: 1100
---
## Overview
## 概述
[KubeSphere](https://kubesphere.io) is a **distributed operating system managing cloud-native applications** with [Kubernetes](https://kubernetes.io) as its kernel, providing a plug-and-play architecture for the seamless integration of third-party applications to boost its ecosystem.
[KubeSphere](https://kubesphere.io) 是在 [Kubernetes](https://kubernetes.io) 之上构建的面向云原生应用的**分布式操作系统**,支持多云与多集群管理,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。它的架构可以非常方便地使第三方应用与云原生生态组件进行即插即用 (plug-and-play) 的集成。
KubeSphere also represents a multi-tenant enterprise-grade container platform with full-stack automated IT operation and streamlined DevOps workflows. It provides developer-friendly wizard web UI, helping enterprises to build out a more robust and feature-rich platform. It boasts the most common functionalities needed for enterprise Kubernetes strategies, such as Kubernetes resource management, DevOps (CI/CD), application lifecycle management, monitoring, logging, service mesh, multi-tenancy, alerting and notification, auditing, storage and networking, autoscaling, access control, GPU support, multi-cluster deployment and management, network policy, registry management, and security management.
作为全栈化容器部署与多租户管理平台KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。它拥有 Kubernetes 企业级服务所需的最常见功能,例如 Kubernetes 资源管理、DevOps、多集群部署与管理、应用生命周期管理、微服务治理、日志查询与收集、服务与网络、多租户管理、监控告警、事件审计、存储、访问控制、GPU 支持、网络策略、镜像仓库管理以及安全管理等。
KubeSphere delivers **consolidated views while integrating a wide breadth of ecosystem tools** around Kubernetes, thus providing consistent user experiences to reduce complexity. At the same time, it also features new capabilities that are not yet available in upstream Kubernetes, alleviating the pain points of Kubernetes including storage, network, security and usability. Not only does KubeSphere allow developers and DevOps teams use their favorite tools in a unified console, but, most importantly, these functionalities are loosely coupled with the platform since they are pluggable and optional.
KubeSphere **围绕 Kubernetes 集成了各种生态系统的工具**,提供了一致的用户体验以降低复杂性。同时,它还具备 Kubernetes 尚未提供的新功能,旨在解决 Kubernetes 本身存在的存储、网络、安全和易用性等痛点。KubeSphere 不仅允许开发人员和 DevOps 团队在统一的控制台中使用他们喜欢的工具,而且最重要的是,这些功能与平台松散耦合,因为他们可以选择是否安装这些可拔插组件。
## Run KubeSphere Everywhere
## 支持在任意平台运行 KubeSphere
As a lightweight platform, KubeSphere has become more friendly to different cloud ecosystems as it does not change Kubernetes itself at all. In other words, KubeSphere can be deployed **on any existing version-compatible Kubernetes cluster on any infrastructure** including virtual machine, bare metal, on-premises, public cloud and hybrid cloud. KubeSphere users have the choice of installing KubeSphere on cloud and container platforms, such as Alibaba Cloud, AWS, QingCloud, Tencent Cloud, Huawei Cloud and Rancher, and even importing and managing their existing Kubernetes clusters created using major Kubernetes distributions. The seamless integration of KubeSphere into existing Kubernetes platforms means that the business of users will not be affected, without any modification to their current resources or assets. For more information, see [Installing on Linux](../../installing-on-linux/) and [Installing on Kubernetes](../../installing-on-kubernetes/).
作为一个轻量级平台KubeSphere 对不同云生态系统的支持变得更加友好,因为它没有对 Kubernetes 本身有任何的 Hack。换句话说KubeSphere 可以**部署并运行在任何基础架构以及所有兼容现有版本的 Kubernetes 集群上**包括虚拟机、裸机、本地环境、公有云和混合云等。KubeSphere 用户可以选择在云和容器平台例如阿里云、AWS、青云QingCloud、腾讯云、华为云和 Rancher 等)上安装 KubeSphere甚至可以导入和管理使用 Kubernetes 发行版创建的现有 Kubernetes 集群。KubeSphere 可以在不修改用户当前的资源或资产、不影响其业务的情况下与现有 Kubernetes 平台无缝集成。有关更多信息,请参见[在 Linux 上安装](../../installing-on-linux/)和[在 Kubernetes 上安装](../../installing-on-kubernetes/)。
KubeSphere screens users from the infrastructure underneath and helps enterprises modernize, migrate, deploy and manage existing and containerized apps seamlessly across a variety of infrastructure types. This is how KubeSphere empowers developers and Ops teams to focus on application development and accelerate DevOps automated workflows and delivery processes with enterprise-level observability and troubleshooting, unified monitoring and logging, centralized storage and networking management, easy-to-use CI/CD pipelines, and so on.
KubeSphere 为用户屏蔽了基础设施底层复杂的技术细节帮助企业在各类基础设施之上无缝地部署、更新、迁移和管理现有的容器化应用。通过这种方式KubeSphere 使开发人员能够专注于应用程序开发,使运维团队能够通过企业级可观察性功能和故障排除机制、统一监控和日志记录、集中式存储和网络管理,以及易用的 CI/CD 流水线来加快 DevOps 自动化工作流程和交付流程等。
![KubeSphere Overview](https://pek3b.qingstor.com/kubesphere-docs/png/20200224091526.png)
## What's New in 3.0
## 3.0 新增功能
- **Multi-cluster Management**. As we usher in an era of hybrid cloud, multi-cluster management has emerged as the call of our times. It represents one of the most necessary features on top of Kubernetes as it addresses the pressing need of our users. In the latest version 3.0, we have equipped KubeSphere with its unique multi-cluster feature that is able to provide a central control plane for clusters deployed in different clouds. Users can import and manage their existing Kubernetes clusters created on the platform of mainstream infrastructure providers (e.g. Amazon EKS and Google Kubernetes Engine). This will greatly reduce the learning cost for our users with operation and maintenance process streamlined as well. Solo and Federation are the two featured patterns for multi-cluster management, making KubeSphere stand out among its counterparts.
- **多集群管理**:随着我们迎来混合云时代,多集群管理已成为我们时代的主题。作为 Kubernetes 上最必要的功能之一,多集群管理可以满足用户的迫切需求。在最新版本 3.0 中,我们为 KubeSphere 配备了多集群功能,该功能可以为部署在不同云中的集群提供一个中央控制面板。用户可以导入和管理在主流基础设施提供商(例如 Amazon EKS 和 Google GKE 等)平台上创建的现有 Kubernetes 集群。通过简化操作和维护流程这将大大降低用户们的学习成本。Solo 和 Federation 是多集群管理的两个特有模式,使 KubeSphere 在同类产品中脱颖而出。
- **Improved Observability**. We have enhanced observability as it becomes more powerful to include custom monitoring, tenant event management, diversified notification methods (e.g. WeChat and Slack) and more features. Among others, users can now customize monitoring dashboards, with a variety of metrics and graphs to choose from for their own needs. It also deserves to mention that KubeSphere 3.0 is compatible with Prometheus, which is the de facto standard for Kubernetes monitoring in the cloud-native industry.
- **改善可观察性**KubeSphere 现支持自定义监控、租户事件管理,以及多样化的通知方法(例如,微信和 Slack可观察性功能大幅增强。另外用户现在可以自定义监控面板并根据自己的需求选择各种监控指标和显示图表。还值得一提的是KubeSphere 3.0 与 Prometheus 兼容,后者是云原生行业中 Kubernetes 监控的事实标准。
- **Enhanced Security**. Security has alway remained one of our focuses in KubeSphere. In this connection, feature enhancements can be summarized as follows:
- **增强安全性**:安全始终是我们在 KubeSphere 中关注的重点之一。在这方面,功能增强可以概括如下:
- **Auditing**. Records will be kept to track who does what at what time. The support of auditing is extremely important especially for traditional industries such as finance and banking.
- **审计**:操作记录将被保存,包括操作的人员和时间。添加对审计的支持极为重要,特别是对于金融和银行业等传统行业而言。
- **Network Policy and Isolation**. Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). By configuring network isolation to control traffic among Pods within the same cluster and traffic from outside, users can isolate applications with security enhanced. They can also decide whether services are accessible externally.
- **网络策略和隔离**:网络策略允许在同一群集内进行网络隔离,这意味着可以在某些实例 (Pod) 之间设置防火墙。通过配置网络策略控制同一集群内 Pod 之间的流量以及来自外部的流量,从而实现应用隔离并增强应用的安全性。用户还可以决定是否可以从外部访问某个服务。
- **Open Policy Agent**. KubeSphere provides flexible, fine-grained access control based on [Open Policy Agent](https://www.openpolicyagent.org/). Users can manage their security and authorization policies in a unified way with a general architecture.
- **Open Policy Agent**KubeSphere 基于 [Open Policy Agent](https://www.openpolicyagent.org/) 提供了细粒度的访问控制。用户可以使用通用体系架构以统一的方式管理其安全性和授权策略。
- **OAuth 2.0**. Users can now easily integrate third-party applications with OAuth 2.0 protocol.
- **OAuth 2.0**:用户现在可以轻松地通过 OAuth 2.0 协议集成第三方应用程序。
- **Web 控制台的多语言支持**KubeSphere 在设计之初便面向全球用户。由于来自全球社区成员们的贡献KubeSphere 3.0 的 Web 控制台现已支持四种官方语言:英文、简体中文、繁体中文和西班牙文。预计将来将支持更多语言。
- **Multilingual Support of Web Console**. KubeSphere is designed for users around the world at the very beginning. Thanks to our community members across the globe, KubeSphere 3.0 now supports four official languages for its web console: English, Simplified Chinese, Traditional Chinese, and Spanish. More languages are expected to be supported going forward.
除了上述主要新增功能之外KubeSphere 3.0 还具有其他功能升级。有关更多详细信息,请参见 3.0.0 的[发行说明](../../release/release-v300/)。
In addition to the above highlights, KubeSphere 3.0 also features other functionality upgrades. For more and detailed information, see [Release Notes for 3.0.0](../../release/release-v300/).
## 开源
## Open Source
借助开源的模式KubeSphere 社区驱动着开发工作以开放的方式进行。KubeSphere **100% 开源免费**,已大规模服务于社区用户,广泛地应用在以 Docker 和 Kubernetes 为中心的开发测试及生产环境中,大量服务平稳地运行在 KubeSphere 之上。您可在 [GitHub](https://github.com/kubesphere/) 上找到所有源代码、文档和讨论。
As we adopt the open source model, development is proceeding in an open way and driven by KubeSphere community. KubeSphere is **100% open source** and available on [GitHub](https://github.com/kubesphere/) where you can find all the source code, documents and discussions. It has been widely installed and used in development, testing and production environments, and a large number of services are running smoothly in KubeSphere.
## 产品规划
## Roadmap
### Express Edition -> KubeSphere 1.0.x -> KubeSphere 2.0.x -> KubeSphere 2.1.x -> KubeSphere 3.0.0
### 易捷版 -> KubeSphere 1.0.x -> KubeSphere 2.0.x -> KubeSphere 2.1.x -> KubeSphere 3.0.0
![Roadmap](https://pek3b.qingstor.com/kubesphere-docs/png/20190926000413.png)
## Landscape
KubeSphere is a member of CNCF and a [Kubernetes Conformance Certified platform](https://www.cncf.io/certification/software-conformance/#logos), further enriching [CNCF CLOUD NATIVE Landscape.
KubeSphere 是 CNCF 基金会成员并且通过了 [Kubernetes 一致性认证](https://www.cncf.io/certification/software-conformance/#logos),进一步丰富了 [CNCF 云原生的生态。
](https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0)
![CNCF Landscape](https://pek3b.qingstor.com/kubesphere-docs/png/20191011233719.png)
![CNCF Landscape](https://pek3b.qingstor.com/kubesphere-docs/png/20191011233719.png)

View File

@ -1,23 +1,22 @@
---
title: "Agent Connection"
keywords: 'kubernetes, kubesphere, multicluster, agent-connection'
keywords: 'Kubernetes, KubeSphere, multicluster, agent-connection'
description: 'Overview'
weight: 2343
weight: 3013
---
## Prerequisites
You have already installed at least two KubeSphere clusters, please refer to [Installing on Linux](../../../installing-on-linux) or [Installing on Kubernetes](../../../installing-on-kubernetes) if not yet.
You have already installed at least two KubeSphere clusters. Please refer to [Installing on Linux](../../../installing-on-linux) or [Installing on Kubernetes](../../../installing-on-kubernetes) if they are not ready yet.
{{< notice note >}}
Multi-cluster management requires Kubesphere to be installed on the target clusters. If you have an existing cluster, please install a minimal KubeSphere on it as an agent, see [Installing Minimal KubeSphere on Kubernetes](../../installing-on-kubernetes/minimal-kubesphere-on-k8s) for details.
Multi-cluster management requires Kubesphere to be installed on the target clusters. If you have an existing cluster, you can deploy KubeSphere on it with a minimal installation so that it can be imported. See [Minimal KubeSphere on Kubernetes](../../../quick-start/minimal-kubesphere-on-k8s/) for details.
{{</ notice >}}
## Agent Connection
The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the H Cluster cannot access the M Cluster directly, you can expose the proxy service address of the H cluster. This enables the M Cluster to connect to the H cluster through the agent. This method is applicable when the M Cluster is in a private environment (e.g. IDC) and the H Cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed in different cloud providers.
The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the H Cluster cannot access the M Cluster directly, you can expose the proxy service address of the H cluster. This enables the M Cluster to connect to the H cluster through the agent. This method is applicable when the M Cluster is in a private environment (e.g. IDC) and the H Cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers.
### Prepare a Host Cluster
@ -25,11 +24,11 @@ The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used
{{< tab "KubeSphere has been installed" >}}
If you already have a standalone KubeSphere installed, you can change the `clusterRole` to a host cluster by editing the cluster configuration and **wait for a while**.
If you already have a standalone KubeSphere installed, you can set the value of `clusterRole` to `host` by editing the cluster configuration. You need to **wait for a while** so that the change can take effect.
- Option A - Use Web Console:
Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for the keyword `ClusterConfiguration` and enter its detailed page, edit the YAML of `ks-installer`. This is similar to Enable Pluggable Components.
Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
@ -37,7 +36,7 @@ Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for
kubectl edit cc ks-installer -n kubesphere-system
```
Scroll down and change the value of `clusterRole` to `host`, then click **Update** to make it effective:
Scroll down and set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective:
```yaml
multicluster:
@ -48,76 +47,82 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
There is no big difference if you just start the installation. Please fill in the `jwtSecret` with the value shown as above in `config-sample.yaml` or `cluster-configuration.yaml`:
```yaml
authentication:
jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU
```
Then scroll down and change the `clusterRole` to `member`:
There is no big difference if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
multicluster:
clusterRole: member
clusterRole: host
```
{{</ tab >}}
{{</ tabs >}}
Then you can use the **kubectl** to retrieve the installation logs to verify the status. Wait for a while, you will be able to see the successful logs return if the host cluster is ready.
You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the host cluster is ready.
```
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
#### Set Proxy Service Address
### Set Proxy Service Address
After the installation of the Host Cluster, a proxy service called tower will be created in `kubesphere-system`, whose type is **LoadBalancer**.
After the installation of the Host Cluster, a proxy service called tower will be created in `kubesphere-system`, whose type is `LoadBalancer`.
{{< tabs >}}
{{< tab "There is a LoadBalancer in your cluster" >}}
{{< tab "A LoadBalancer available in your cluster" >}}
If a LoadBalancer plugin is available for the cluster, you can see a corresponding address for `EXTERNAL-IP`, which will be acquired by KubeSphere automatically. That means we can skip the step to set the proxy.
If a LoadBalancer plugin is available for the cluster, you can see a corresponding address for `EXTERNAL-IP`, which will be acquired by KubeSphere automatically. That means you can skip the step to set the proxy. Execute the following command to check the service.
```bash
kubectl -n kubesphere-system get svc
```
The output may look as follows:
```shell
$ kubectl -n kubesphere-system get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP 16h
```
> Generally, there is always a LoadBalancer solution in the public cloud, and the external IP should be allocated by Load Balancer automatically. If your clusters are running in an on-premises environment (Especially for the **bare metal environment**), we recommend you to use [Porter](https://github.com/porter/porter) as the LB solution.
Note: Generally, there is always a LoadBalancer solution in the public cloud, and the external IP can be allocated by the load balancer automatically. If your clusters are running in an on-premises environment, especially a **bare metal environment**, you can use [Porter](https://github.com/kubesphere/porter) as the LB solution.
{{</ tab >}}
{{< tab "There is not a LoadBalancer in your cluster" >}}
{{< tab "No LoadBalancer available in your cluster" >}}
1. If you cannot see a corresponding address displayed (the EXTERNAL-IP is pending), you need to manually set the proxy address. For example, you have an available public IP address `139.198.120.120`. And the port `8080` of this IP address has been forwarded to the port `30721` of the cluster.
1. If you cannot see a corresponding address displayed (the EXTERNAL-IP is pending), you need to manually set the proxy address. For example, you have an available public IP address `139.198.120.120`, and the port `8080` of this IP address has been forwarded to the port `30721` of the cluster. Execute the following command to check the service.
```shell
kubectl -n kubesphere-system get svc
```
```
```shell
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tower LoadBalancer 10.233.63.191 <pending> 8080:30721/TCP 16h
tower LoadBalancer 10.233.63.191 <pending> 8080:30721/TCP 16h
```
2. Change the ConfigMap of the ks-installer and input the the address you set before. You can also edit the ConfigMap from **Configuration → ConfigMaps**, search for the keyword `kubesphere-config`, then edit its YAML and add the following configuration:
2. Add the value of `proxyPublishAddress` to the configuration file of ks-installer and input the public IP address and port number as follows.
- Option A - Use Web Console:
Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
```bash
kubectl -n kubesphere-system edit cm kubesphere-config
kubectl -n kubesphere-system edit clusterconfiguration ks-installer
```
```
Navigate to `multicluster` and add a new line for `proxyPublishAddress` to define the IP address so access tower.
```yaml
multicluster:
clusterRole: host
proxyPublishAddress: http://139.198.120.120:8080 # Add this line to set the address to access tower
```
3. Save and update the ConfigMap, then restart the Deployment `ks-apiserver`.
3. Save the configuration and restart `ks-apiserver`.
```shell
kubectl -n kubesphere-system rollout restart deployment ks-apiserver
@ -130,12 +135,14 @@ kubectl -n kubesphere-system rollout restart deployment ks-apiserver
### Prepare a Member Cluster
In order to manage the member cluster within the host cluster, we need to make the jwtSecret same between them. So first you need to get it from the host by the following command.
In order to manage the member cluster within the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host cluster** by the following command.
```bash
kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret
```
The output may look like this:
```yaml
jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU"
```
@ -144,11 +151,11 @@ jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU"
{{< tab "KubeSphere has been installed" >}}
If you already have a standalone KubeSphere installed, you can change the `clusterRole` to a host cluster by editing the cluster configuration and **wait for a while**.
If you already have a standalone KubeSphere installed, you can set the value of `clusterRole` to `member` by editing the cluster configuration. You need to **wait for a while** so that the change can take effect.
- Option A - Use Web Console:
Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for the keyword `ClusterConfiguration` and enter its detailed page, edit the YAML of `ks-installer`. This is similar to Enable Pluggable Components.
Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
@ -156,14 +163,14 @@ Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for
kubectl edit cc ks-installer -n kubesphere-system
```
Then input the corresponding jwtSecret shown above:
Input the corresponding `jwtSecret` shown above:
```yaml
authentication:
jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU
```
Then scroll down and change the value of `clusterRole` to `member`, then click **Update** to make it effective:
Scroll down and set the value of `clusterRole` to `member`, then click **Update** (if you use the web console) to make it effective:
```yaml
multicluster:
@ -174,14 +181,14 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
There is no big difference if you just start the installation. Please fill in the `jwtSecret` with the value shown as above in `config-sample.yaml` or `cluster-configuration.yaml`:
There is no big difference if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
authentication:
jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU
```
Then scroll down and change the `clusterRole` to `member`:
Scroll down and set the value of `clusterRole` to `member`:
```yaml
multicluster:
@ -195,15 +202,15 @@ multicluster:
### Import Cluster
1. Open the H Cluster Dashboard and click **Add Cluster**.
1. Open the H Cluster dashboard and click **Add Cluster**.
![Add Cluster](https://ap3.qingstor.com/kubesphere-website/docs/20200827231611.png)
2. Enter the basic information of the imported cluster and click **Next**.
2. Enter the basic information of the cluster to be imported and click **Next**.
![Import Cluster](https://ap3.qingstor.com/kubesphere-website/docs/20200827211842.png)
3. In **Connection Method**, select **Cluster connection agent** and Click **Import**.
3. In **Connection Method**, select **Cluster connection agent** and click **Import**.
![agent-en](/images/docs/agent-en.png)

View File

@ -1,18 +1,17 @@
---
title: "Direct Connection"
keywords: 'kubernetes, kubesphere, multicluster, hybrid-cloud'
keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud, direct-connection'
description: 'Overview'
weight: 2340
weight: 3011
---
## Prerequisites
You have already installed at least two KubeSphere clusters, please refer to [Installing on Linux](../../../installing-on-linux) or [Installing on Kubernetes](../../../installing-on-kubernetes) if not yet.
You have already installed at least two KubeSphere clusters. Please refer to [Installing on Linux](../../../installing-on-linux) or [Installing on Kubernetes](../../../installing-on-kubernetes) if they are not ready yet.
{{< notice note >}}
Multi-cluster management requires Kubesphere to be installed on the target clusters. If you have an existing cluster, please install a minimal KubeSphere on it as an agent, see [Installing Minimal KubeSphere on Kubernetes](../../installing-on-kubernetes/minimal-kubesphere-on-k8s) for details.
Multi-cluster management requires Kubesphere to be installed on the target clusters. If you have an existing cluster, you can deploy KubeSphere on it with a minimal installation so that it can be imported. See [Minimal KubeSphere on Kubernetes](../../../quick-start/minimal-kubesphere-on-k8s/) for details.
{{</ notice >}}
## Direct Connection
@ -25,11 +24,11 @@ If the kube-apiserver address of Member Cluster (hereafter referred to as **M**
{{< tab "KubeSphere has been installed" >}}
If you already have a standalone KubeSphere installed, you can change the `clusterRole` to a host cluster by editing the cluster configuration and **wait for a while**.
If you already have a standalone KubeSphere installed, you can set the value of `clusterRole` to `host` by editing the cluster configuration. You need to **wait for a while** so that the change can take effect.
- Option A - Use Web Console:
Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for the keyword `ClusterConfiguration` and enter its detailed page, edit the YAML of `ks-installer`. This is similar to Enable Pluggable Components.
Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
@ -37,7 +36,7 @@ Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for
kubectl edit cc ks-installer -n kubesphere-system
```
Scroll down and change the value of `clusterRole` to `host`, then click **Update** to make it effective:
Scroll down and set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective:
```yaml
multicluster:
@ -48,7 +47,7 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
There is no big difference if you just start the installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set like following:
There is no big difference if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
multicluster:
@ -59,20 +58,22 @@ multicluster:
{{</ tabs >}}
Then you can use the **kubectl** to retrieve the installation logs to verify the status. Wait for a while, you will be able to see the successful logs return if the host cluster is ready.
You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the host cluster is ready.
```
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
### Prepare a Member Cluster
In order to manage the member cluster within the host cluster, we need to make the jwtSecret same between them. So first you need to get it from the host by the following command.
In order to manage the member cluster within the **host cluster**, you need to make `jwtSecret` the same between them. Therefore, you need to get it first from the **host cluster** by the following command.
```bash
kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret
```
The output may look like this:
```yaml
jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU"
```
@ -81,11 +82,11 @@ jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU"
{{< tab "KubeSphere has been installed" >}}
If you already have a standalone KubeSphere installed, you can change the `clusterRole` to a host cluster by editing the cluster configuration and **wait for a while**.
If you already have a standalone KubeSphere installed, you can set the value of `clusterRole` to `member` by editing the cluster configuration. You need to **wait for a while** so that the change can take effect.
- Option A - Use Web Console:
Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for the keyword `ClusterConfiguration` and enter its detailed page, edit the YAML of `ks-installer`. This is similar to Enable Pluggable Components.
Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
@ -93,14 +94,14 @@ Use `cluster-admin` account to enter **Cluster Management → CRDs**, search for
kubectl edit cc ks-installer -n kubesphere-system
```
Then input the corresponding jwtSecret shown above:
Input the corresponding `jwtSecret` shown above:
```yaml
authentication:
jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU
```
Then scroll down and change the value of `clusterRole` to `member`, then click **Update** to make it effective:
Scroll down and set the value of `clusterRole` to `member`, then click **Update** (if you use the web console) to make it effective:
```yaml
multicluster:
@ -111,16 +112,16 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
There is no big difference if you just start the installation. Please fill in the `jwtSecret` with the value shown as above in `config-sample.yaml` or `cluster-configuration.yaml`:
There is no big difference if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
authentication:
jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU
```
Then scroll down and change the `clusterRole` to `member`:
Scroll down and set the value of `clusterRole` to `member`:
```
```yaml
multicluster:
clusterRole: member
```
@ -129,25 +130,25 @@ multicluster:
{{</ tabs >}}
Then you can use the **kubectl** to retrieve the installation logs to verify the status. Wait for a while, you will be able to see the successful logs return if the host cluster is ready.
You can use **kubectl** to retrieve the installation logs to verify the status by running the following command. Wait for a while, and you will be able to see the successful log return if the member cluster is ready.
```
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
### Import Cluster
1. Open the H Cluster Dashboard and click **Add Cluster**.
1. Open the H Cluster dashboard and click **Add Cluster**.
![Add Cluster](https://ap3.qingstor.com/kubesphere-website/docs/20200827231611.png)
2. Enter the basic information of the cluster and click **Next**.
2. Enter the basic information of the cluster to be imported and click **Next**.
![Import Cluster](https://ap3.qingstor.com/kubesphere-website/docs/20200827211842.png)
3. In **Connection Method**, select **Direct Connection to Kubernetes cluster**.
4. [Retrieve the KubeConfig](../retrieve-kubeconfig), then copy the KubeConfig of the Member Cluster and paste it into the box.
4. [Retrieve the KubeConfig](../retrieve-kubeconfig), copy the KubeConfig of the Member Cluster and paste it into the box.
{{< notice tip >}}
Please make sure the `server` address in KubeConfig is accessible on any node of the H Cluster. For `KubeSphere API Server` address, you can fill in the KubeSphere APIServer address or leave it blank.

View File

@ -1,10 +1,9 @@
---
title: "Retrieve KubeConfig"
keywords: 'kubernetes, kubesphere, multicluster, hybrid-cloud'
keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud, kubeconfig'
description: 'Overview'
weight: 2345
weight: 3014
---
## Prerequisites
@ -13,13 +12,13 @@ You have a KubeSphere cluster.
## Explore KubeConfig File
Go to `$HOME/.kube`, and see what files are there. Typically, there is a file named config. Use the following command to retrieve the KubeConfig file:
Go to `$HOME/.kube`, and check the file in the directory where, normally, a file named **config** exists. Use the following command to retrieve the KubeConfig file:
```bash
cat $HOME/.kube/config
```
```
```yaml
apiVersion: v1
clusters:
- cluster:

View File

@ -1,14 +1,13 @@
---
title: "Kubernetes Federation in KubeSphere"
keywords: 'kubernetes, kubesphere, multicluster, hybrid-cloud'
keywords: 'Kubernetes, KubeSphere, federation, multicluster, hybrid-cloud'
description: 'Overview'
weight: 2340
weight: 3007
---
The multi-cluster feature relates to the network connection among multiple clusters. Therefore, it is important to understand the topological relations of clusters as the workload can be reduced.
Before you use the multi-cluster feature, you need to create a Host Cluster (hereafter referred to as **H** Cluster), which is actually a KubeSphere cluster that has enabled the multi-cluster feature. All the clusters managed by the H Cluster are called Member Cluster (hereafter referred to as **M** Cluster). They are common KubeSphere clusters that do not have the multi-cluster feature enabled. There can only be one H Cluster while multiple M Clusters can exist at the same time. In a multi-cluster architecture, the network between the H Cluster and the M Cluster can be connected directly or through an agent. The network between M Clusters can be set in a completely isolated environment.
Before you use the multi-cluster feature, you need to create a Host Cluster (hereafter referred to as **H** Cluster), which is actually a KubeSphere cluster with the multi-cluster feature enabled. All the clusters managed by the H Cluster are called Member Cluster (hereafter referred to as **M** Cluster). They are common KubeSphere clusters that do not have the multi-cluster feature enabled. There can only be one H Cluster while multiple M Clusters can exist at the same time. In a multi-cluster architecture, the network between the H Cluster and the M Cluster can be connected directly or through an agent. The network between M Clusters can be set in a completely isolated environment.
![Kubernetes Federation in KubeSphere](https://ap3.qingstor.com/kubesphere-website/docs/20200907232319.png)

View File

@ -1,16 +1,15 @@
---
title: "Overview"
keywords: 'kubernetes, kubesphere, multicluster, hybrid-cloud'
keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud'
description: 'Overview'
weight: 2335
weight: 3006
---
Today, it's very common for organizations to run and manage multiple Kubernetes Clusters on different cloud providers or infrastructures. Each Kubernetes cluster is a relatively self-contained unit. And the upstream community is struggling to research and develop the multi-cluster management solution, such as [kubefed](https://github.com/kubernetes-sigs/kubefed).
Today, it's very common for organizations to run and manage multiple Kubernetes clusters across different cloud providers or infrastructures. As each Kubernetes cluster is a relatively self-contained unit, the upstream community is struggling to research and develop a multi-cluster management solution. That said, Kubernetes Cluster Federation ([KubeFed](https://github.com/kubernetes-sigs/kubefed) for short) may be a possible approach among others.
The most common use cases in multi-cluster management including **service traffic load balancing, development and production isolation, decoupling of data processing and data storage, cross-cloud backup and disaster recovery, flexible allocation of computing resources, low latency access with cross-region services, and no vendor lock-in,** etc.
The most common use cases of multi-cluster management include service traffic load balancing, development and production isolation, decoupling of data processing and data storage, cross-cloud backup and disaster recovery, flexible allocation of computing resources, low latency access with cross-region services, and vendor lock-in avoidance.
KubeSphere is developed to address the multi-cluster and multi-cloud management challenges and implement the proceeding user scenarios, providing users with a unified control plane to distribute applications and its replicas to multiple clusters from public cloud to on-premise environment. KubeSphere also provides rich observability cross multiple clusters including centralized monitoring, logging, events, and auditing logs.
KubeSphere is developed to address multi-cluster and multi-cloud management challenges and implement the proceeding user scenarios, providing users with a unified control plane to distribute applications and its replicas to multiple clusters from public cloud to on-premises environments. KubeSphere also provides rich observability cross multiple clusters including centralized monitoring, logging, events, and auditing logs.
![KubeSphere Multi-cluster Management](/images/docs/multi-cluster-overview.jpg)

View File

@ -80,7 +80,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Alerting and Notification after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -96,7 +96,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
alerting:
@ -113,7 +113,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -23,7 +23,7 @@ For more information, see App Store.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -31,7 +31,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable App Store in this mode (e.g. for testing purpose), refer to the following section to see how App Store can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable App Store in this mode (e.g. for testing purpose), refer to the following section to see how App Store can be installed after installation.
{{</ notice >}}
@ -74,7 +74,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable App Store after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -90,7 +90,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
openpitrix:
@ -105,7 +105,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -19,7 +19,7 @@ For more information, see Logging, Events, and Auditing.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -27,7 +27,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Auditing in this mode (e.g. for testing purpose), refer to the following section to see how Auditing can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Auditing in this mode (e.g. for testing purpose), refer to the following section to see how Auditing can be installed after installation.
{{</ notice >}}
@ -106,7 +106,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Auditing Logs after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -122,7 +122,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
auditing:
@ -155,7 +155,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -21,7 +21,7 @@ For more information, see DevOps Administration.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -29,7 +29,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable DevOps in this mode (e.g. for testing purpose), refer to the following section to see how DevOps can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable DevOps in this mode (e.g. for testing purpose), refer to the following section to see how DevOps can be installed after installation.
{{</ notice >}}
@ -72,7 +72,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable DevOps after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -88,7 +88,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
devops:
@ -103,7 +103,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -19,7 +19,7 @@ For more information, see Logging, Events and Auditing.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -27,7 +27,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Events in this mode (e.g. for testing purpose), refer to the following section to see how Events can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Events in this mode (e.g. for testing purpose), refer to the following section to see how Events can be installed after installation.
{{</ notice >}}
@ -66,7 +66,7 @@ es: # Storage backend for logging, tracing, events and auditing.
When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Events, do not use `kubectl apply -f` directly for this file.
1. In the tutorial of [Installing KubeSphere on Kubernetes](https://kubesphere.io/docs/installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Events, create a local file cluster-configuration.yaml.
1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Events, create a local file cluster-configuration.yaml.
```bash
vi cluster-configuration.yaml
@ -106,7 +106,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Events after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -122,7 +122,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
events:
@ -155,7 +155,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -19,7 +19,7 @@ For more information, see Logging, Events and Auditing.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -27,7 +27,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Logging in this mode (e.g. for testing purpose), refer to the following section to see how Logging can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Logging in this mode (e.g. for testing purpose), refer to the following section to see how Logging can be installed after installation.
{{</ notice >}}
@ -105,7 +105,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Logging after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -121,7 +121,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
logging:
@ -154,7 +154,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -77,7 +77,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Network Policy after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -93,7 +93,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
networkpolicy:
@ -108,7 +108,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -19,7 +19,7 @@ For more information, see related sections in Project Administration and Usage.
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -27,7 +27,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Service Mesh in this mode (e.g. for testing purpose), refer to the following section to see how Service Mesh can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Service Mesh in this mode (e.g. for testing purpose), refer to the following section to see how Service Mesh can be installed after installation.
{{</ notice >}}
@ -70,7 +70,7 @@ kubectl apply -f cluster-configuration.yaml
## Enable Service Mesh after Installation
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -86,7 +86,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource
![edit-yaml](https://ap3.qingstor.com/kubesphere-website/docs/20200827182002.png)
4. In this yaml file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **Update** at the bottom right corner to save the configuration.
4. In this yaml file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
```bash
servicemesh:
@ -101,7 +101,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -13,27 +13,27 @@ icon: "/images/docs/docs.svg"
Quickstarts include six hands-on lab exercises that help you quickly get started with KubeSphere. It is highly recommended that you go though all of these parts to explore the basic feature of KubeSphere.
## [All-in-one Installation on Linux](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/)
## [All-in-one Installation on Linux](../quick-start/all-in-one-on-linux/)
Learn how to install KubeSphere on Linux with a minimal installation package. The tutorial serves as a basic kick-starter for you to understand the container platform, paving the way for learning the following guides.
## [Minimal KubeSphere on Kubernetes](https://kubesphere.io/docs/quick-start/minimal-kubesphere-on-k8s/)
## [Minimal KubeSphere on Kubernetes](../quick-start/minimal-kubesphere-on-k8s/)
Learn how to install KubeSphere on existing Kubernetes clusters with a minimal installation package. Your Kubernetes clusters can be hosted on cloud or on-premises.
## [Create Workspace, Project, Account and Role](https://kubesphere.io/docs/quick-start/create-workspace-and-project/)
## [Create Workspace, Project, Account and Role](../quick-start/create-workspace-and-project/)
Understand how you can take advantage of multi-tenant system in KubeSphere for fine-grained access control at different levels.
## [Deploy Bookinfo](https://kubesphere.io/docs/quick-start/deploy-bookinfo-to-k8s/)
## [Deploy Bookinfo and Manage Traffic](../quick-start/deploy-bookinfo-to-k8s/)
Explore KubeSphere service mesh by deploying Bookinfo and using different traffic management strategies, such as canary release.
## [Compose and Deploy Wordpress](https://kubesphere.io/docs/quick-start/composing-an-app/)
## [Compose and Deploy Wordpress](../quick-start/wordpress-deployment/)
Learn the entire process of deploying an example app in KubeSphere, including credential creation, volume creation, and component setting.
## [Enable Pluggable Components](https://kubesphere.io/docs/quick-start/enable-pluggable-components/)
## [Enable Pluggable Components](../quick-start/enable-pluggable-components/)
Install pluggable components on the platform so that you can explore KubeSphere in an all-around way. Pluggable components can be enabled both before and after the installation.

View File

@ -37,9 +37,31 @@ The system requirements above and the instructions below are for the default min
- The node can be accessed through `SSH`.
- `sudo`/`curl`/`openssl` should be used.
- `ebtables`/`socat`/`ipset`/`conntrack` should be installed in advance.
- `docker` can be installed by yourself or by KubeKey.
{{< notice note >}}
`docker` must be installed in advance if you want to deploy KubeSphere in an offline environment.
{{</ notice >}}
### Dependency Requirements
KubeKey can install Kubernetes and KubeSphere together. The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the list below to see if you need to install relevant dependencies on your node in advance.
| Dependency | Kubernetes Version ≥ 1.18 | Kubernetes Version < 1.18 |
| ----------- | ------------------------- | ------------------------- |
| `socat` | Required | Optional but recommended |
| `conntrack` | Required | Optional but recommended |
| `ebtables` | Optional but recommended | Optional but recommended |
| `ipset` | Optional but recommended | Optional but recommended |
{{< notice info >}}
Developed in Go language, KubeKey represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides users with flexible installation choices, as they can install KubeSphere and Kubernetes separately or install them at one time, which is convenient and efficient.
{{</ notice >}}
### Network and DNS Requirements
- Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in clusters.
@ -80,18 +102,12 @@ wget https://github.com/kubesphere/kubekey/releases/download/v1.0.0/kubekey-v1.0
{{</ tabs >}}
Grant the execution right to `kk`:
Make `kk` executable:
```bash
chmod +x kk
```
{{< notice info >}}
Developed in Go language, KubeKey represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides users with flexible installation choices, as they can install KubeSphere and Kubernetes separately or install them at one time, which is convenient and efficient.
{{</ notice >}}
## Step 3: Get Started with Installation
In this QuickStart tutorial, you only need to execute one command for installation, the template of which is shown below:
@ -121,12 +137,6 @@ After you execute the command, you will see a table as below for environment che
Make sure the above components marked with `y` are installed and input `yes` to continue.
{{< notice note >}}
If you download the binary file directly in Step 2, you do not need to install `docker` as KubeKey will install it automatically.
{{</ notice >}}
## Step 4: Verify the Installation
When you see the output as below, it means the installation finishes.
@ -171,7 +181,7 @@ You may need to bind EIP and configure port forwarding in your environment for e
After logging in the console, you can check the status of different components in **Components**. You may need to wait for some components to be up and running if you want to use related services. You can also use `kubectl get pod --all-namespaces` to inspect the running status of KubeSphere workloads.
![components](https://ap3.qingstor.com/kubesphere-website/docs/components.png)
![components](/images/docs/quickstart/kubesphere-components.png)
## Enable Pluggable Components (Optional)

View File

@ -38,11 +38,11 @@ After KubeSphere is installed, you need to add different users with varied roles
{{< notice tip >}}
For account security, it is highly recommended that you change your password the first time you log in the console. To change your password, select **User Settings** in the drop-down menu at the top right corner. In **Password Setting**, set a new password.
For account security, it is highly recommended that you change your password the first time you log in the console. To change your password, select **User Settings** in the drop-down menu in the top-right corner. In **Password Setting**, set a new password.
{{</ notice >}}
2. After you log in the console, click **Platform** at the top left corner and select **Access Control**.
2. After you log in the console, click **Platform** in the top-left corner and select **Access Control**.
![access-control](https://ap3.qingstor.com/kubesphere-website/docs/access-control.png)
@ -71,7 +71,7 @@ Click **OK** after you finish. A newly-created account will display in the accou
{{< notice tip >}}
To log out, click your username at the top right corner and select **Log Out**.
To log out, click your username in the top-right corner and select **Log Out**.
{{</ notice >}}
@ -92,7 +92,7 @@ For detailed information about the four accounts you need to create, refer to th
In this task, you need to create a workspace using the account `ws-manager` created in the previous task. As the basic logic unit for the management of projects, DevOps projects and organization members, workspaces underpin multi-tenant system of KubeSphere.
1. Log in KubeSphere as `ws-manager` which has the authorization to manage all workspaces on the platform. Click **Platform** at the top left corner. In **Workspaces**, you can see there is only one default workspace **system-workspace** listed, where system-related components and services run. You are not allowed to delete this workspace.
1. Log in KubeSphere as `ws-manager` which has the authorization to manage all workspaces on the platform. Click **Platform** in the top-left corner. In **Workspaces**, you can see there is only one default workspace **system-workspace** listed, where system-related components and services run. You are not allowed to delete this workspace.
![create-workspace](https://ap3.qingstor.com/kubesphere-website/docs/create-workspace.jpg)
@ -178,7 +178,7 @@ A route refers to Ingress in Kubernetes, which is an API object that manages ext
{{< notice note >}}
If you want to expose services using the type `LoadBalancer`, you need to use the [LoadBalancer plugin of cloud providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). If your Kubernetes cluster is running in a bare metal environment, it is recommended you use [Porter](https://github.com/kubesphere/porter) as the LoadBalancer plugin.
If you want to expose services using the type `LoadBalancer`, you need to use the [LoadBalancer plugin of cloud providers](https://kubernetes.io/zh/docs/concepts/cluster-administration/cloud-providers/). If your Kubernetes cluster is running in a bare metal environment, it is recommended you use [Porter](https://github.com/kubesphere/porter) as the LoadBalancer plugin.
{{</ notice >}}

View File

@ -1,8 +1,231 @@
---
title: "Deploy a Bookinfo App"
title: "Deploy Bookinfo and Manage Traffic"
keywords: 'kubesphere, kubernetes, docker, multi-tenant'
description: 'Deploy a Bookinfo App'
linkTitle: "Deploy a Bookinfo App"
linkTitle: "Deploy Bookinfo and Manage Traffic"
weight: 3040
---
[Istio](https://istio.io/), as an open-source service mesh solution, provides powerful features of traffic management for microservices. Here is the introduction of traffic management from the official website of [Istio](https://istio.io/latest/docs/concepts/traffic-management/):
*Istios traffic routing rules let you easily control the flow of traffic and API calls between services. Istio simplifies configuration of service-level properties like circuit breakers, timeouts, and retries, and makes it easy to set up important tasks like A/B testing, canary rollouts, and staged rollouts with percentage-based traffic splits. It also provides out-of-box failure recovery features that help make your application more robust against failures of dependent services or the network.*
KubeSphere provides three kinds of grayscale strategies based on Istio, including blue-green deployment, canary release and traffic mirroring.
Among others, a canary release represents an effective software development strategy in which a new version is deployed for testing with the base version preserved in the production environment. This strategy will bring part of the traffic to the new version being tested and the production release takes up the rest.
## Objective
In this tutorial, you will learn how to deploy a sample application Bookinfo composed of four separate microservices and use the traffic management feature of KubeSphere to publish a new version.
## Prerequisites
- You need to enable [KubeSphere Service Mesh](../../pluggable-components/service-mesh/).
- You need to finish all tasks in [Create Workspace, Project, Account and Role](../create-workspace-and-project/).
- You need to enable **Application Governance**. To do so, follow the steps below:
Log in the console as `project-admin` and go to your project. Navigate to **Advanced Settings** under **Project Settings**, click **Edit**, and select **Edit Gateway**. In the dialog that appears, flip on the toggle switch next to **Application Governance**.
![edit-gateway](https://ap3.qingstor.com/kubesphere-website/docs/20200908145220.png)
![switch-application-governance](https://ap3.qingstor.com/kubesphere-website/docs/20200908150358.png)
{{< notice note >}}
You need to enable **Application Governance** so that you can use the Tracing feature. Once it is enabled, please check whether an annotation (e.g. `nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route (Ingress) if the route is inaccessible.
{{</ notice >}}
## Estimated Time
About 20 minutes.
## What is Bookinfo Application
The Bookinfo application is composed of four separate microservices as shown below. There are three versions of the **reviews** microservice.
- The **productpage** microservice calls the **details** and **reviews** microservices to populate the page.
- The **details** microservice contains book information.
- The **reviews** microservice contains book reviews. It also calls the ratings microservice.
- The **ratings** microservice contains book ranking information that accompanies a book review.
The end-to-end architecture of the application is shown below. See [Bookinfo Application](https://istio.io/latest/docs/examples/bookinfo/) for more details.
![Bookinfo Application](https://pek3b.qingstor.com/kubesphere-docs/png/20190718152533.png#align=left&display=inline&height=1030&originHeight=1030&originWidth=1712&search=&status=done&width=1712)
## Hands-on Lab
### Task 1: Deploy Bookinfo
1. Log in the console as `project-regular` and enter **demo-project**. Navigate to **Applications** under **Application Workloads**, and click **Deploy Sample Application** on the right.
![sample-bookinfo](https://ap3.qingstor.com/kubesphere-website/docs/20200908100219.png)
2. Click **Next** in the dialog that appears where required fields are pre-populated and relevant components are already set. You do not need to change the setting and just click **Create** in the final page (**Internet Access**).
![create-bookinfo](https://ap3.qingstor.com/kubesphere-website/docs/20200908101041.png)
3. In **Workloads**, make sure the status of all four deployments displays `running`, which means the app has been created successfully.
![running](https://ap3.qingstor.com/kubesphere-website/docs/20200908101328.png)
{{< notice note >}}
It may take a few minutes before the deployments are up and running.
{{</ notice >}}
### Task 2: Access Bookinfo
1. In **Applications**, go to **Composing App** and click the app `bookinfo` to see its detailed information.
![click-bookinfo](https://ap3.qingstor.com/kubesphere-website/docs/20200908102119.png)
{{< notice note >}}
If you do not see the app in the list, refresh your page.
{{</ notice >}}
2. In the detail page, record the hostname and port number of the app which will be used to access Bookinfo.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908102821.png)
3. As the app will be accessed outside the cluster via NodePort, you need to open the port in the image above (in this case, the port number is 32277) in your security group for outbound traffic and set any port forwarding rules if necessary.
4. Edit your local host file (`/etc/hosts`) by adding an entry in it to map the hostname to the public IP address. For example:
```bash
# {Public IP} {hostname}
139.198.19.38 productpage.demo-project.192.168.0.2.nip.io
```
{{< notice warning >}}
Do not copy the content above directly to your local host file. Please replace it with your own public IP address and hostname.
{{</ notice >}}
5. When you finish, click the button **Click to visit** to access the app.
![click-to-visit](https://ap3.qingstor.com/kubesphere-website/docs/20200908105527.png)
6. In the app detail page, click **Normal user** in the bottom-left corner.
![normal-user](https://ap3.qingstor.com/kubesphere-website/docs/20200908105756.png)
7. In the image below, you can notice that only **Reviewer1** and **Reviewer2** are displayed without any stars in the **Book Reviews** section. This is the status of this app version. In the task below, you can see a different UI appearance through a canary release.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908110106.png)
### Task 3: Create Canary Release
1. Go back to KubeSphere console and select **Grayscale Release**. Click **Create Canary Release Job** and you will be directed to **Grayscale Release** section of the project. Select **Canary Release** and click **Create Job**.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908110903.png)
![create-job](https://ap3.qingstor.com/kubesphere-website/docs/20200908111003.png)
2. Add a name (e.g. `canary-release`) and click **Next**. Select **reviews** as the component to roll out a change and click **Next**.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908111359.png)
3. In the next dialog, enter `v2` as **Grayscale Release Version Number** and change the image to `kubesphere/examples-bookinfo-reviews-v2:1.13.0` (`v1` changed to `v2`). Click **Next** to continue.
![release-version](https://ap3.qingstor.com/kubesphere-website/docs/20200908111958.png)
4. The canary release supports two release strategies: **Forward by traffic ratio** and **Forward by request content**. In this tutorial, please select **Forward by traffic ratio** and set the same traffic ratio for v1 and v2 (50% each). You can click the icon in the middle and move leftwards or rightwards to change the traffic ratio. Click **Create** to finish the setting.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908113031.png)
5. The job created will display in **Job Status**.
![canary-release-test](https://ap3.qingstor.com/kubesphere-website/docs/20200908113728.png)
### Task 4: Verify Canary Release
Visit the Bookinfo website again and refresh your browser repeatedly. You will be able to see the **Book Reviews** section switched between v1 and v2 at a rate of 50%.
![verify-canary-release](https://ap3.qingstor.com/kubesphere-website/docs/canary.gif)
### Task 5: View Network Topology
1. Execute the following command in the machine where KubeSphere runs to bring in real traffic to simulate the access to Bookinfo every 0.5 seconds.
```bash
watch -n 0.5 "curl http://productpage.demo-project.192.168.0.2.nip.io:32277/productpage?u=normal"
```
{{< notice note >}}
Make sure you replace the project name, IP address and port number in the above command with your own.
{{</ notice >}}
2. In **Traffic Management**, you can see communications, dependency, health and performance among different microservices.
![traffic-management](https://ap3.qingstor.com/kubesphere-website/docs/20200908133652.png)
3. Click a component (e.g. **reviews**) and you can see the information of traffic monitoring on the right, displaying real-time data of **Traffic**, **Success rate** and **Duration**.
![real-time-data](https://ap3.qingstor.com/kubesphere-website/docs/20200908134454.png)
### Task 6: View Tracing Details
KubeSphere provides the distributed tracing feature based on [Jaeger](https://www.jaegertracing.io/), which is used to monitor and troubleshoot microservices-based distributed applications.
1. In **Tracing** tab, you can clearly see all phases and internal calls of requests, as well as the period in each phase.
![tracing](https://ap3.qingstor.com/kubesphere-website/docs/20200908135108.png)
2. Click any item, and you can even drill down to see request details and where this request is being processed (which machine or container).
![tracing-kubesphere](https://ap3.qingstor.com/kubesphere-website/docs/20200908135252.png)
### Task 7: Take Over All Traffic
With the canary release, you can test the new version online by bringing in part of the actual traffic and collect user feedback. If everything runs smoothly without any issues, you can bring all the traffic to the new version.
1. In **Grayscale Release**, click the canary release job.
![open-canary-release](https://ap3.qingstor.com/kubesphere-website/docs/20200908140138.png)
2. In the dialog that appears, click the three dots of **reviews v2** and select **Take Over**. It means 100% of the traffic will be sent to the new version (v2).
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908140314.png)
{{< notice note >}}
If anything goes wrong with the new version, you can roll back to the previous version v1 anytime.
{{</ notice >}}
3. Open the Bookinfo page again and refresh the browser several times. You can find that it only shows the result of **reviews v2** (i.e. ratings with black stars).
![](https://ap3.qingstor.com/kubesphere-website/docs/20200908140921.png)
### Task 8: Remove the Old Version
Now that the new version v2 takes over all the traffic successfully, you can remove the old version and release the resources of v1 based on your needs.
{{< notice warning >}}
After you remove a certain version, related workloads and Istio-based configuration resources will also be deleted.
{{</ notice >}}
1. In **Grayscale Release**, click the canary release job.
![open-canary-release](https://ap3.qingstor.com/kubesphere-website/docs/20200908140138.png)
2. In the dialog that appears, click **Job offline** to remove the old version.
![job-offline](https://ap3.qingstor.com/kubesphere-website/docs/20200908142246.png)
The above tasks serve as a example of how to adopt a canary release to control traffic and publish a new version of your app. You can also try different strategies in **Grayscale Release** or see related sections in **Project Administration and Usage**.
## Reference
[Bookinfo Application](https://istio.io/latest/docs/examples/bookinfo/)

View File

@ -27,7 +27,7 @@ For more information about each component, see Overview of Enable Pluggable Comp
{{< notice note >}}
- If you use KubeKey to install KubeSphere on Linux, by default, the above components are not enabled except `metrics_server`. However, `metrics_server` remains disabled in the installer if you install KubeSphere on existing Kubernetes clusters. This is because the component may already be installed in your environment, especially for cloud-hosted Kubernetes clusters.
- `multicluster` is not covered in this tutorial. If you want to enable this feature, you need to set a corresponding value for `clusterRole`. For more information, see [Multi-cluster Management](https://kubesphere.io/docs/multicluster-management/).
- `multicluster` is not covered in this tutorial. If you want to enable this feature, you need to set a corresponding value for `clusterRole`. For more information, see [Multi-cluster Management](../../multicluster-management/).
- Make sure your machine meets the hardware requirements before the installation. Here is the recommendation if you want to enable all pluggable components: CPU ≥ 8 Cores, Memory ≥ 16 G, Disk Space ≥ 100 G.
{{</ notice >}}
@ -38,7 +38,7 @@ For more information about each component, see Overview of Enable Pluggable Comp
When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](https://kubesphere.io/docs/installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
```bash
vi config-sample.yaml
@ -46,7 +46,7 @@ vi config-sample.yaml
{{< notice note >}}
If you adopt [All-in-one Installation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (e.g. for testing purpose), refer to the following section to see how pluggable components can be installed after installation.
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (e.g. for testing purpose), refer to the following section to see how pluggable components can be installed after installation.
{{</ notice >}}
@ -83,7 +83,7 @@ Whether you install KubeSphere on Linux or on Kubernetes, you can check the stat
KubeSphere web console provides a convenient way for users to view and operate on different resources. To enable pluggable components after installation, you only need to make few adjustments in the console directly. For those who are accustomed to the Kubernetes command-line tool, kubectl, they will have no difficulty in using KubeSphere as the tool is integrated into the console.
1. Log in the console as `admin`. Click **Platform** at the top left corner and select **Clusters Management**.
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
![clusters-management](https://ap3.qingstor.com/kubesphere-website/docs/20200828111130.png)
@ -113,7 +113,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon at the bottom right corner of the console.
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
{{</ notice >}}

View File

@ -7,7 +7,7 @@ linkTitle: "Minimal KubeSphere on Kubernetes"
weight: 3020
---
In addition to installing KubeSphere on a Linux machine, you can also deploy it on existing Kubernetes clusters directly. This QuickStart guide walks you through the general steps of completing a minimal KubeSphere installation on Kubernetes. For more information, see [Installing on Kubernetes](https://kubesphere.io/docs/installing-on-kubernetes/).
In addition to installing KubeSphere on a Linux machine, you can also deploy it on existing Kubernetes clusters directly. This QuickStart guide walks you through the general steps of completing a minimal KubeSphere installation on Kubernetes. For more information, see [Installing on Kubernetes](../../installing-on-kubernetes/).
{{< notice note >}}
@ -15,7 +15,7 @@ In addition to installing KubeSphere on a Linux machine, you can also deploy it
- Make sure your machine meets the minimal hardware requirement: CPU > 1 Core, Memory > 2 G;
- A default Storage Class in your Kubernetes cluster needs to be configured before the installation;
- The CSR signing feature is activated in kube-apiserver when it is started with the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters. See [RKE installation issue](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309).
- For more information about the prerequisites of installing KubeSphere on Kubernetes, see [Prerequisites](https://kubesphere.io/docs/installing-on-kubernetes/introduction/prerequisites/).
- For more information about the prerequisites of installing KubeSphere on Kubernetes, see [Prerequisites](../../installing-on-kubernetes/introduction/prerequisites/).
{{</ notice >}}
@ -54,7 +54,7 @@ kubectl get svc/ks-console -n kubesphere-system
- Make sure port 30880 is opened in security groups and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`).
- After logging in the console, you can check the status of different components in **Components**. You may need to wait for some components to be up and running if you want to use related services.
![components](https://ap3.qingstor.com/kubesphere-website/docs/components.png)
![components](/images/docs/quickstart/kubesphere-components.png)
## Enable Pluggable Components (Optional)

View File

@ -37,7 +37,7 @@ The environment variable `WORDPRESS_DB_PASSWORD` is the password to connect to t
![create-secret](https://ap3.qingstor.com/kubesphere-website/docs/20200903154611.png)
2. Enter the basic information (e.g. name it `mysql-secret`) and click **Next**. In the next page, select **Default** for **Type** and click **Add Data** to add a key-value pair. Input the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) as below and click `√` at the bottom right corner to confirm. When you finish, click **Create** to continue.
2. Enter the basic information (e.g. name it `mysql-secret`) and click **Next**. In the next page, select **Default** for **Type** and click **Add Data** to add a key-value pair. Input the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) as below and click `√` in the bottom-right corner to confirm. When you finish, click **Create** to continue.
![key-value](https://ap3.qingstor.com/kubesphere-website/docs/20200903155603.png)
@ -85,7 +85,7 @@ Follow the same steps above to create a WordPress secret `wordpress-secret` with
![container-image](https://ap3.qingstor.com/kubesphere-website/docs/container-image.png)
7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click `√` at the bottom right corner as the setting is not finished yet.
7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click `√` in the bottom-right corner as the setting is not finished yet.
![](https://ap3.qingstor.com/kubesphere-website/docs/20200903174120.png)

View File

@ -40,7 +40,7 @@ wget https://github.com/kubesphere/kubekey/releases/download/v1.0.0/kubekey-v1.0
{{</ tabs >}}
Grant the execution right to `kk`:
Make `kk` executable:
```bash
chmod +x kk

View File

@ -0,0 +1,13 @@
---
title: "support"
css: "scss/contribution.scss"
section1:
title: 'Community is the Soul of KubeSphere'
content: 'Join the community to get help, get involved, or get updates and KubeSphere news!'
topImage: "/images/contribution/contribution-top.jpg"
sectionIframe:
formUrl: https://jinshuju.net/f/bDS8me/embedded.js?inner_redirect=false&banner=show&background=white&height=1838
---

View File

@ -0,0 +1 @@
<a href="{{ .Destination | safeURL }}"{{ with .Title}} title="{{ . }}"{{ end }}{{ if strings.HasPrefix .Destination "http" }} target="_blank" rel="noopener noreferrer"{{ end }}>{{ .Text }}</a>

View File

@ -1,22 +0,0 @@
{{ define "main" }}
<section class="td-search-result">
<div class="">
<h2 style="margin-top: 200px;" class="ml-4">{{ .Title }}</h2>
{{ with .Site.Params.gcs_engine_id }}
<script>
(function() {
var cx = '{{ . }}';
var gcse = document.createElement('script');
gcse.type = 'text/javascript';
gcse.async = true;
gcse.src = 'https://cse.google.com/cse.js?cx=' + cx;
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(gcse, s);
})();
</script>
<gcse:searchresults-only></gcse:searchresults-only>
{{ end }}
</div>
</section>
{{ end }}

View File

@ -0,0 +1,15 @@
{{ define "main" }}
{{ with .Params.section1 }}
<section class="section-1 bg-cover" style='background-image: url("{{ .topImage }}");'>
<div class="title-div common-layout">
<h1 class="title-center-h1">{{ .title }}</h1>
<p class="common-p">{{ .content }}</p>
</div>
</section>
{{ end }}
{{ with .Params.sectionIframe }}
<script src="{{ .formUrl }}"></script>
{{ end }}
{{ end }}

View File

@ -28,7 +28,7 @@
{{ range .list }}
<div>
<img src="{{ .image }}" alt="">
<p>{{ .content }}</p>
<p><a href="{{ .link }}">{{ .content }}</a></p>
</div>
{{ end }}
</div>

View File

@ -1,264 +1,308 @@
{{ define "main" }}
<!DOCTYPE html>
<html lang="{{ with .Site.LanguageCode }}{{ . }}{{ else }}en-US{{ end }}">
<head>
{{- partial "head.html" . -}}
</head>
<section class='section-1 bg-cover'>
<div class="common-layout">
{{ partial "breadcrumb.html" . }}
<div class="title-div">
{{ $sections := .FirstSection.Sections.ByWeight }}
{{ $secondSection := .FirstSection }}
{{ $page := . }}
{{ range $sections }}
{{ if $page.IsDescendant . }}
{{ $secondSection = . }}
<body>
{{- partial "header.html" . -}}
<main class='main-section {{ if eq .Site.Language.Lang "zh"}} padding{{ end }}'>
<section class='section-1 bg-cover'>
<div class="common-layout">
{{ partial "breadcrumb.html" . }}
<div class="title-div">
{{ $sections := .FirstSection.Sections.ByWeight }}
{{ $secondSection := .FirstSection }}
{{ $page := . }}
{{ range $sections }}
{{ if $page.IsDescendant . }}
{{ $secondSection = . }}
{{ end }}
{{ end }}
{{ end }}
<h1>{{ $secondSection.LinkTitle }}</h1>
<div class="menu section-control">
<h1>{{ $secondSection.LinkTitle }}</h1>
<div class="menu section-control">
<div></div>
<div></div>
<div></div>
</div>
</div>
<div class="search-div">
{{ partial "searchInput" . }}
</div>
</div>
</section>
<section class="section-menu">
{{ with (.GetPage "/docs") }}
<div class="common-layout">
{{ partial "section" . }}
</div>
{{ end }}
</section>
<section class="section-2">
<div class="common-layout">
<div class="left-div left-tree">
<div class="inner-tree">
{{ partial "tree.html" . }}
<!-- <div class="download-div">
<span class="download-icon"></span>
<span>{{ i18n "Download this page (PDF)" }}</span>
</div> -->
</div>
</div>
<div class="second-section-menu">
<div></div>
<div></div>
<div></div>
</div>
</div>
<div class="search-div">
{{ partial "searchInput" . }}
</div>
</div>
</section>
<section class="section-menu">
{{ with (.GetPage "/docs") }}
<div class="common-layout">
{{ partial "section" . }}
</div>
{{ end }}
</section>
<section class="section-2">
<div class="common-layout">
<div class="left-div">
{{ partial "tree.html" . }}
<!-- <div class="download-div">
<span class="download-icon"></span>
<span>{{ i18n "Download this page (PDF)" }}</span>
</div> -->
</div>
<div class="second-section-menu">
<div></div>
<div></div>
<div></div>
</div>
<div class="middle-div">
<div class="top-div common-flex-layout">
<div class="contributor-div">
<span>{{ i18n "Last updated" }}:</span>
<span class="update-time"></span>
<a href="{{ .Site.Params.githubBlobUrl }}/{{ .Site.Language.Lang }}/{{ .File.Path }}" target="_blank" rel="noopener noreferrer">
<ul class="contributor-avatar"></ul>
</a>
<span class="more-contributor"></span>
</div>
<div class="mutual-div common-flex-layout">
<div class="edit-div">
<a href="{{ .Site.Params.githubEditUrl }}/{{ .Site.Language.Lang }}/{{ .File.Path }}" target="_blank" rel="noopener noreferrer">
<span class="icon-span"></span>
<span>{{ i18n "edit" }}</span>
<div class="middle-div">
<div class="top-div common-flex-layout">
<div class="contributor-div">
<span>{{ i18n "Last updated" }}:</span>
<span class="update-time"></span>
<a href="{{ .Site.Params.githubBlobUrl }}/{{ .Site.Language.Lang }}/{{ .File.Path }}" target="_blank" rel="noopener noreferrer">
<ul class="contributor-avatar"></ul>
</a>
<span class="more-contributor"></span>
</div>
<div class="feedback-div">
<a href="{{ .Site.Params.githubUrl }}/issues/new" target="_blank" rel="noopener noreferrer">
<span class="icon-span"></span>
<span>{{ i18n "feedback" }}</span>
</a>
</div>
<div class="share-div">
<div>
<span class="icon-span"></span>
<span>{{ i18n "share" }}</span>
<div class="mutual-div common-flex-layout">
<div class="edit-div">
<a href="{{ .Site.Params.githubEditUrl }}/{{ .Site.Language.Lang }}/{{ .File.Path }}" target="_blank" rel="noopener noreferrer">
<span class="icon-span"></span>
<span>{{ i18n "edit" }}</span>
</a>
</div>
<div class="feedback-div">
<a href="{{ .Site.Params.githubUrl }}/issues/new" target="_blank" rel="noopener noreferrer">
<span class="icon-span"></span>
<span>{{ i18n "feedback" }}</span>
</a>
</div>
<div class="share-div">
<div>
<span class="icon-span"></span>
<span>{{ i18n "share" }}</span>
</div>
<ul>
<li>
<img src="/images/docs/twitter.svg" alt="twitter">
<a href="http://twitter.com/share?url={{ .Permalink }}&text={{ .Title }}" target="_blank" rel="noopener noreferrer">twitter</a>
</li>
<li>
<img src="/images/docs/linkedIn.svg" alt="linkedIn">
<a href="http://www.linkedin.com/shareArticle?mini=true&url={{ .Permalink }}" target="_blank" rel="noopener noreferrer">linkedIn</a>
</li>
<li>
<img src="/images/docs/facebook.svg" alt="facebook">
<a href="http://www.facebook.com/sharer.php?u={{ .Permalink }}" target="_blank" rel="noopener noreferrer">facebook</a>
</li>
<li>
<img src="/images/docs/reddit.svg" alt="reddit">
<a href="http://reddit.com/submit?url={{ .Permalink }}&title={{ .Title }}" target="_blank" rel="noopener noreferrer">reddit</a>
</li>
<li>
<img src="/images/docs/email.svg" alt="email" >
<a href="mailto:kubesphere@gmail.com" target="_blank" rel="noopener noreferrer">email</a>
</li>
</ul>
</div>
<ul>
<li>
<img src="/images/docs/twitter.svg" alt="twitter">
<a data-type="twitter" href="http://twitter.com/share?url={{ .Permalink }}&text={{ .Title }}" target="_blank" rel="noopener noreferrer">twitter</a>
</li>
<li>
<img src="/images/docs/linkedIn.svg" alt="linkedIn">
<a data-type="linkedin" href="http://www.linkedin.com/shareArticle?mini=true&url={{ .Permalink }}" target="_blank" rel="noopener noreferrer">linkedIn</a>
</li>
<li>
<img src="/images/docs/facebook.svg" alt="facebook">
<a data-type="facebook" href="http://www.facebook.com/sharer.php?u={{ .Permalink }}" target="_blank" rel="noopener noreferrer">facebook</a>
</li>
<li>
<img src="/images/docs/email.svg" alt="email" >
<a href="mailto:kubesphere@gmail.com" target="_blank" rel="noopener noreferrer">email</a>
</li>
</ul>
</div>
</div>
</div>
<div class="content-div main-div">
<div class="md-body">
<h1>{{ .Title }}</h1>
{{ .Content }}
<div class="content-div main-div">
<div class="md-body">
<h1>{{ .Title }}</h1>
{{ .Content }}
</div>
</div>
{{ if .IsPage }}
<div class="page-div common-flex-layout">
{{ with .Next }}
<!-- {{ if .IsDescendant (.GetPage "/docs")}} -->
<a class="last" href="{{.RelPermalink}}">
<img src="/images/docs/last.svg" alt="{{ i18n "Last" }}">
{{ i18n "Last" }}
<span>: {{.LinkTitle}}</span>
</a>
<!-- {{ end }} -->
{{ end }}
{{ with .Prev }}
<!-- {{ if .IsDescendant (.GetPage "/docs")}} -->
<a class="next" href="{{.RelPermalink}}">
{{ i18n "Next" }}
<span>: {{.LinkTitle}}</span>
<img src="/images/docs/next.svg" alt="{{ i18n "Next" }}">
</a>
<!-- {{ end }} -->
{{ end }}
</div>
{{ end }}
<div></div>
</div>
{{ if .IsPage }}
<div class="page-div common-flex-layout">
{{ with .Next }}
<!-- {{ if .IsDescendant (.GetPage "/docs")}} -->
<a class="last" href="{{.RelPermalink}}">
<img src="/images/docs/last.svg" alt="{{ i18n "Last" }}">
{{ i18n "Last" }}
<span>: {{.LinkTitle}}</span>
</a>
<!-- {{ end }} -->
{{ end }}
{{ with .Prev }}
<!-- {{ if .IsDescendant (.GetPage "/docs")}} -->
<a class="next" href="{{.RelPermalink}}">
{{ i18n "Next" }}
<span>: {{.LinkTitle}}</span>
<img src="/images/docs/next.svg" alt="{{ i18n "Next" }}">
</a>
<!-- {{ end }} -->
{{ end }}
<div class="aside">
<div class="inner-div">
<div class='title'>
{{ i18n "Whats on this Page" }}
</div>
<div class='tabs'>
{{ .TableOfContents }}
</div>
</div>
</div>
{{ end }}
<div></div>
</div>
{{ if .IsPage }}
<div class="aside aside-fixed">
<div class='title'>
{{ i18n "Whats on this Page" }}
</div>
<div class='tabs'>
{{ .TableOfContents }}
</div>
</div>
{{ end }}
</div>
</section>
{{ if .IsPage }}
<script src='{{ "js/aside.js" | relURL }}'></script>
{{ end }}
<script src='{{ "js/markdown-tab.js" | relURL }}'></script>
<script>
var languageCode = '{{.Site.Language.Lang}}'
var filePath = '{{ .File.Path }}'
var githubUrl = '{{ .Site.Params.githubUrl }}'
// var filePath = 'README.md'
var getLatestTime = function(data) {
var commit = data[0].commit
return commit.author.date.replace("T", " ").replace("Z", "")
}
var filterData = function(data) {
var arr = []
var length = data.length
for (var i = 0; i < length; i++) {
var author = data[i].author
if (author) {
var avatar_url = author.avatar_url
if (avatar_url && !arr.includes(avatar_url)) {
arr.push(avatar_url)
</section>
{{ $aside := resources.Get "js/aside.js" }}
{{ $asideJS := $aside | resources.Fingerprint "sha512" }}
<script type="text/javascript" src="{{ $asideJS.RelPermalink }}" integrity="{{ $asideJS.Data.Integrity }}"></script>
{{ $tab := resources.Get "js/markdown-tab.js" }}
{{ $tabJS := $tab | resources.Fingerprint "sha512" }}
<script type="text/javascript" src="{{ $tabJS.RelPermalink }}" integrity="{{ $tabJS.Data.Integrity }}"></script>
<script>
var languageCode = '{{.Site.Language.Lang}}'
var filePath = '{{ .File.Path }}'
var githubUrl = '{{ .Site.Params.githubUrl }}'
// var filePath = 'README.md'
var getLatestTime = function(data) {
var commit = data[0].commit
return commit.author.date.replace("T", " ").replace("Z", "")
}
var filterData = function(data) {
var arr = []
var length = data.length
for (var i = 0; i < length; i++) {
var author = data[i].author
if (author) {
var avatar_url = author.avatar_url
if (avatar_url && !arr.includes(avatar_url)) {
arr.push(avatar_url)
}
}
}
return arr
}
return arr
}
var getFileContributors = function() {
var url = 'https://api.github.com/repos/kubesphere/website/commits?path=content/' + languageCode + '/' + filePath
$.getJSON(url, function(data) {
if (!data || data.length === 0) {
$('.contributor-div').hide()
return
}
var newData = filterData(data)
var time = getLatestTime(data)
renderLatestTime(time)
renderContributors(newData)
})
}
var renderContributors = function(data) {
var contributor = $('.contributor-avatar')
var length = data.length
var len = 5
$.each(data.slice(0, len), function(index, item) {
var img = $("<img />", {
"src": item
var getFileContributors = function() {
var url = 'https://api.github.com/repos/kubesphere/website/commits?path=content/' + languageCode + '/' + filePath
$.getJSON(url, function(data) {
if (!data || data.length === 0) {
$('.contributor-div').hide()
return
}
var newData = filterData(data)
var time = getLatestTime(data)
renderLatestTime(time)
renderContributors(newData)
})
$("<li />", {
"html": img
}).appendTo(contributor);
});
var t = length - len
if (t > 0) {
$('.more-contributor').html('+' + t)
}
}
var renderLatestTime = function(time) {
$('.update-time').html(time)
}
var bindClickMenu = function() {
var sectionMenu = $(".section-menu")
$('.section-control').on('click', function(e){
if (sectionMenu.is(":hidden")) {
sectionMenu.show();
} else {
sectionMenu.hide()
var renderContributors = function(data) {
var contributor = $('.contributor-avatar')
var length = data.length
var len = 5
$.each(data.slice(0, len), function(index, item) {
var img = $("<img />", {
"src": item
})
$("<li />", {
"html": img
}).appendTo(contributor);
});
var t = length - len
if (t > 0) {
$('.more-contributor').html('+' + t)
}
}
var renderLatestTime = function(time) {
$('.update-time').html(time)
}
var bindClickMenu = function() {
var sectionMenu = $(".section-menu")
$('.section-control').on('click', function(e){
if (sectionMenu.is(":hidden")) {
sectionMenu.show();
} else {
sectionMenu.hide()
}
$(document).one("click", function(){
sectionMenu.hide()
});
e.stopPropagation()
})
sectionMenu.on("click", function(e){
e.stopPropagation();
})
}
var bindClickSecondMenu = function() {
var sectionMenu = $(".section-2 .left-div")
$('.second-section-menu').on('click', function(e){
sectionMenu.css("left", "0")
$(document).one("click", function(){
sectionMenu.css("left", "-274px")
});
e.stopPropagation()
})
sectionMenu.on("click", function(e){
e.stopPropagation();
})
}
var useViewer = function() {
var viewer = new Viewer(document.querySelector('.md-body'), {
url: 'src'
})
}
var __main = function() {
getFileContributors()
bindClickMenu()
bindClickSecondMenu()
useViewer()
}
__main()
</script>
</main>
$(document).one("click", function(){
sectionMenu.hide()
});
<footer>
<div class="down-main">
<div class='img-div'>
<a class='wechat' href="javascript:void(0);">
{{ "/static/images/footer/wechat.svg" | readFile | safeHTML }}
<div class="hide-div">
<p>Follow the official account</p>
<img src="/images/footer/wechat_code.svg" alt="">
</div>
</a>
<a class='facebook-a' href="https://www.facebook.com/kubesphere" target="_blank"></a>
<a class='twitter-a' href="{{ .Site.Params.twitterLink }}" target="_blank" rel="noopener noreferrer"></a>
<a class='linkedin-a' href="{{ .Site.Params.linkedinLink }}" target="_blank" rel="noopener noreferrer"></a>
<a class='youtube-a' href="{{ .Site.Params.youtubeLink }}" target="_blank" rel="noopener noreferrer"></a>
<a class='slack-a' href="{{ .Site.Params.slackLink }}" target="_blank" rel="noopener noreferrer"></a>
<a class='github-a' href="{{ .Site.Params.githubLink }}" target="_blank" rel="noopener noreferrer"></a>
<a class='medium-a' href="{{ .Site.Params.mediumLink }}" target="_blank" rel="noopener noreferrer"></a>
</div>
<p class='p1'>{{ i18n "KubeSphere® 2020 All Rights Reserved." }}</p>
</div>
</footer>
</body>
e.stopPropagation()
})
sectionMenu.on("click", function(e){
e.stopPropagation();
})
}
var bindClickSecondMenu = function() {
var sectionMenu = $(".section-2 .left-div")
$('.second-section-menu').on('click', function(e){
sectionMenu.css("left", "0")
$(document).one("click", function(){
sectionMenu.css("left", "-274px")
});
e.stopPropagation()
})
sectionMenu.on("click", function(e){
e.stopPropagation();
})
}
var useViewer = function() {
var viewer = new Viewer(document.querySelector('.md-body'), {
url: 'src'
})
}
var __main = function() {
getFileContributors()
bindClickMenu()
bindClickSecondMenu()
useViewer()
}
__main()
</script>
{{ end }}
</html>

View File

@ -2,9 +2,6 @@
{{ $data := index .Site.Data .Site.Language.Lang }}
<section class='section-1'>
{{ with .Params.section1 }}
<img class='img1' src="/images/home/bg.jpg" alt="{{ i18n "background image"}}">
<img class='img2' src="/images/home/12-3.png" alt="{{ i18n "background image"}}">
<img class='img3' src="/images/home/2.svg" alt="{{ i18n "background image"}}">
<div class="common-layout">
<div>
<p class='p1'>{{ .title | safeHTML }}</p>
@ -21,6 +18,9 @@
<img class='img2' src="/images/home/left.svg" alt="{{ i18n "background image"}}">
<img class='img4' src="/images/home/46.svg" alt="{{ i18n "background image"}}">
</div>
<img class='img1' src="/images/home/bg.jpg" alt="{{ i18n "background image"}}">
<img class='img2' src="/images/home/12-3.png" alt="{{ i18n "background image"}}">
<img class='img3' src="/images/home/2.svg" alt="{{ i18n "background image"}}">
{{ end }}
</section>

View File

@ -23,7 +23,7 @@
<span><a href='{{ $parent | relLangURL }}'>{{ i18n $text }}</a> > </span>
<span>{{ $context.Title }}</span>
</div>
<div class='main-div'>
<div class='main-div middle-div'>
<div class='author'>{{ $context.Params.author }}</div>
<div class='date'>{{ i18n "Published on" }}{{ string $context.Params.createTime }}</div>
<h1>{{ $context.Title }}</h1>
@ -39,16 +39,23 @@
{{ partial "share.html" }}
</div>
</div>
<div class='aside aside-fixed'>
<div class='title'>{{ i18n "Table of Contents" }}</div>
<div class='tabs'>{{ $context.TableOfContents }}</div>
<div class='aside'>
<div class="inner-div">
<div class='title'>{{ i18n "Table of Contents" }}</div>
<div class='tabs'>{{ $context.TableOfContents }}</div>
</div>
</div>
</div>
</section>
{{ partial "footer.html" $context }}
<script src='{{ "js/aside.js" | relURL }}'></script>
<script src='{{ "js/markdown-tab.js" | relURL }}'></script>
{{ $aside := resources.Get "js/aside.js" }}
{{ $asideJS := $aside | resources.Fingerprint "sha512" }}
<script type="text/javascript" src="{{ $asideJS.RelPermalink }}" integrity="{{ $asideJS.Data.Integrity }}"></script>
{{ $tab := resources.Get "js/markdown-tab.js" }}
{{ $tabJS := $tab | resources.Fingerprint "sha512" }}
<script type="text/javascript" src="{{ $tabJS.RelPermalink }}" integrity="{{ $tabJS.Data.Integrity }}"></script>
<script>
var viewer = new Viewer(document.querySelector('.md-body'), {
url: 'src'

View File

@ -2,8 +2,12 @@
{{ $firstSection := .firstSection }}
{{ $pages := (union $s.Pages $s.Sections).ByWeight }}
{{ $first := index $s.Pages 0}}
{{ if $first.IsPage }}
<a href="{{ $first.RelPermalink }}">{{ $firstSection.LinkTitle }}</a>
{{ if $first }}
{{ if $first.IsPage }}
<a href="{{ $first.RelPermalink }}">{{ $firstSection.LinkTitle }}</a>
{{ else }}
{{ partial "firstPageInSection" (dict "section" $first "firstSection" $firstSection) }}
{{ end }}
{{ else }}
{{ partial "firstPageInSection" (dict "section" $first "firstSection" $firstSection) }}
{{ end }}
<a href="{{ $s.RelPermalink }}">{{ $firstSection.LinkTitle }}</a>
{{ end }}

View File

@ -33,9 +33,9 @@
{{ range .Children }}
<li {{ if eq $section .URL }} class="active"{{ end}}>
{{ if hasPrefix .URL "http" }}
<a href="{{ .URL }}" target="_blank" rel="noopener noreferrer">{{ .Name }}</a>
<a href="{{ .URL }}" target="_blank" rel="noopener noreferrer">{{ .Name | safeHTML }}</a>
{{ else }}
<a href="{{ .URL | relLangURL }}">{{ .Name }}</a>
<a href="{{ .URL | relLangURL }}">{{ .Name | safeHTML }}</a>
{{ end }}
</li>
{{ end }}
@ -110,13 +110,13 @@
<ul class="dropdown-menu">
{{ range .Children }}
<li {{ if eq $section .URL }} class="active"{{ end}}>
<a href="{{ .URL | relLangURL }}">{{ .Name }}</a>
<a href="{{ .URL | relLangURL }}">{{ .Name | safeHTML }}</a>
</li>
{{ end }}
</ul>
</li>
{{ else }}
<li {{ if eq $section .URL }} class="active"{{ end}}><a data-docs="{{ .Name }}" href="{{ .URL | relLangURL }}">{{ .Name }}</a></li>
<li {{ if eq $section .URL }} class="active"{{ end}}><a data-docs="{{ .Name }}" href="{{ .URL | relLangURL }}">{{ .Name | safeHTML }}</a></li>
{{ end }}
{{ end }}

View File

@ -1,17 +1,17 @@
<div class="share">
<a data-type="twitter" href="http://twitter.com/share?url={{ .Permalink }}&text={{ .Title }}" target="_blank" rel="noopener noreferrer">
<a href="http://twitter.com/share?url={{ .Permalink }}&text={{ .Title }}" target="_blank" rel="noopener noreferrer">
<img src="/images/share/Twitter.svg" alt="twitter icon">
</a>
<a data-type="reddit" href="http://reddit.com/submit?url={{ .Permalink }}&title={{ .Title }}" target="_blank" rel="noopener noreferrer">
<a href="http://reddit.com/submit?url={{ .Permalink }}&title={{ .Title }}" target="_blank" rel="noopener noreferrer">
<img src="/images/share/Reddit.svg" alt="reddit icon">
</a>
<a data-type="facebook" href="http://www.facebook.com/sharer.php?u={{ .Permalink }}" target="_blank" rel="noopener noreferrer">
<a href="http://www.facebook.com/sharer.php?u={{ .Permalink }}" target="_blank" rel="noopener noreferrer">
<img src="/images/share/Facebook.svg" alt="facebook icon">
</a>
<a data-type="linkedin" href="http://www.linkedin.com/shareArticle?mini=true&url={{ .Permalink }}" target="_blank" rel="noopener noreferrer">
<a href="http://www.linkedin.com/shareArticle?mini=true&url={{ .Permalink }}" target="_blank" rel="noopener noreferrer">
<img src="/images/share/Linkedin.svg" alt="linkedin icon">
</a>
<a data-type="hackernews" href="https://news.ycombinator.com/submitlink?u={{ .Permalink }}&t={{ .Title }}" target="_blank" rel="noopener noreferrer">
<a href="https://news.ycombinator.com/submitlink?u={{ .Permalink }}&t={{ .Title }}" target="_blank" rel="noopener noreferrer">
<img src="/images/share/HackerNews.svg" alt="hackernews icon">
</a>
</div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@ -0,0 +1,32 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="40" height="40" viewBox="0 0 40 40">
<defs>
<path id="yakiddwsaa" d="M0 0H40V40H0z"/>
</defs>
<g fill="none" fill-rule="evenodd">
<g>
<g>
<g>
<g transform="translate(-860 -571) translate(828 539) translate(20 20) translate(12 12)">
<mask id="anjbwj3q2b" fill="#fff">
<use xlink:href="#yakiddwsaa"/>
</mask>
<g mask="url(#anjbwj3q2b)">
<g>
<g>
<path fill="#E85454" fill-rule="nonzero" d="M5.299 4.641c-.05-.062-.144-.067-.2-.01-.529.537-2.844 3.038-2.655 5.545.11 2.078 1.635 3.335 1.72 3.403.066.054 2.304 2.448 10.333 6.446.02.005.096.03.14-.024 0 0 .062-.05.03-.129-3.69-8.06-8.632-14.318-9.368-15.23zM13.667 22.158c-.026-.09-.135-.09-.135-.09v-.005l-10.229.358c-.102.003-.163.115-.11.203 1.145 1.927 2.995 3.38 4.917 2.91 1.374-.34 4.482-2.51 5.508-3.245h-.003c.078-.074.052-.131.052-.131zM13.786 21.066C9.537 18.2 1.603 13.914.442 13.29c-.074-.04-.168-.002-.191.08-.716 2.431.328 4.366.328 4.366 1.134 2.396 3.297 3.123 3.297 3.123 1.002.41 2.002.436 2.002.436.155.029 6.215.005 7.839-.003.07-.002.105-.07.105-.07.053-.083-.036-.156-.036-.156zM12.836.332c-2.772.72-4.291 3.531-3.41 6.541l.003.006c.925 4.099 5.467 10.83 6.444 12.243.068.07.124.04.124.04.104-.027.096-.129.096-.129l.002.001C17.49 5.107 14.947.66 14.565.08c-.028-.043-.078-.065-.128-.059-.504.064-1.6.312-1.6.312zM24.59 3.563l-.002-.006c-.026-.091-.735-2.559-3.455-3.22 0 0-.747-.19-1.562-.314-.337-.051-3.1 4.788-1.673 19.023.013.093.08.115.08.115.1.04.15-.057.15-.057l.001.001c1.005-1.45 5.523-8.15 6.442-12.229 0 0 .498-1.974.019-3.313zM20.452 22.072l-.018.004c-.078.02-.124.108-.089.181.007.013.016.026.028.036v.001c1 .718 4.034 2.84 5.496 3.25 0 0 2.613.89 4.93-2.915.054-.088-.007-.202-.11-.205l-10.237-.354v.002zM33.75 13.351c-.025-.082-.117-.122-.193-.081-1.158.625-9.085 4.923-13.334 7.787v.002c-.116.077-.044.23.052.23 1.645.004 7.873.013 8.028-.014 0 0 .803-.037 1.798-.416 0 0 2.215-.704 3.366-3.22 0 0 .98-1.96.283-4.288zM31.553 10.162c.164-2.592-2.128-5.014-2.654-5.533-.056-.056-.148-.051-.198.01-.731.906-5.662 7.148-9.35 15.192h.002c-.06.127.072.238.17.182 7.7-3.833 9.926-6.117 10.312-6.433.08-.066 1.631-1.366 1.718-3.418z" transform="translate(3 3)"/>
</g>
<g>
<path fill="#D84848" fill-rule="nonzero" d="M14.372 19.238C7.518 15.65 5.83 13.781 5.578 13.58c-.085-.068-1.61-1.325-1.72-3.403-.142-1.89 1.138-3.776 2.003-4.82-.263-.341-.456-.584-.562-.715-.05-.062-.144-.067-.2-.01-.529.537-2.844 3.038-2.655 5.545.111 2.078 1.635 3.335 1.72 3.403.067.054 2.308 2.45 10.333 6.446.02.005.096.03.14-.024 0 0 .063-.05.03-.129-.097-.212-.196-.423-.295-.634zM7.292 21.295s-1-.027-2.002-.436c0 0-2.163-.727-3.297-3.123 0 0-.899-1.668-.455-3.852-.52-.284-.903-.49-1.096-.594-.074-.04-.168-.002-.191.08-.716 2.431.328 4.366.328 4.366 1.134 2.396 3.297 3.123 3.297 3.123 1.002.41 2.002.436 2.002.436.067.013 1.239.015 2.679.013-.727-.001-1.222-.005-1.265-.013zM16.232 17.55c-1.71-2.617-4.664-7.454-5.39-10.67l-.001-.007c-.883-3.01.637-5.821 3.41-6.54 0 0 .173-.04.413-.091-.04-.072-.075-.126-.1-.163-.027-.043-.077-.065-.127-.059-.504.064-1.6.312-1.6.312-2.773.72-4.292 3.531-3.41 6.541l.002.006c.925 4.099 5.467 10.83 6.444 12.243.068.07.124.04.124.04.104-.027.096-.129.096-.129l.002.001c.051-.508.096-1.002.137-1.485zM20.754.247c-.295-.065-.728-.155-1.183-.224-.337-.051-3.1 4.788-1.673 19.023.013.093.08.115.08.115.1.04.15-.057.15-.057l.001.001c.213-.307.584-.851 1.045-1.555-.95-11.365.872-16.25 1.58-17.303zM21.847 22.119l-1.394-.049-.001.002-.018.004c-.078.02-.124.108-.089.181.007.013.016.026.028.036v.001c1 .718 4.034 2.84 5.496 3.25 0 0 .677.23 1.647-.045-1.57-.507-4.58-2.602-5.669-3.38zM33.97 14.468c-.036-.358-.105-.732-.22-1.117-.025-.082-.117-.122-.193-.081-1.158.625-9.085 4.923-13.334 7.787v.002c-.116.077-.044.23.052.23.433.002 1.184.003 2.058.004 3.483-2.275 8.656-5.183 11.636-6.825zM29.558 5.347c-.286-.34-.527-.589-.659-.718-.056-.056-.148-.051-.198.01-.731.906-5.662 7.148-9.35 15.192h.002c-.06.127.072.238.17.182.531-.265 1.036-.521 1.516-.771 3.117-6.62 7.002-11.93 8.519-13.895z" transform="translate(3 3)"/>
</g>
<g>
<path fill="#C2C2C4" fill-rule="nonzero" d="M3.954 2.243H1.31c-.037 0-.066-.029-.066-.065V.094c0-.036-.03-.066-.066-.066H.066C.029.028 0 .058 0 .094v5.341c0 .036.03.066.066.066h1.113c.037 0 .066-.03.066-.066V3.283c0-.037.03-.066.066-.066h2.642c.037 0 .066.03.066.066v2.152c0 .036.03.066.066.066h1.117c.036 0 .066-.03.066-.066V.094c0-.036-.03-.066-.066-.066H4.085c-.036 0-.066.03-.066.066v2.084c0 .036-.03.065-.065.065zM10.19 3.308c0 .966-.295 1.368-1.327 1.368-1.031 0-1.336-.402-1.336-1.368V.094c0-.036-.03-.065-.065-.065H6.356c-.036 0-.066.029-.066.065v3.242c0 .694.065 1.176.337 1.47.485.537 1.205.753 2.236.753 1.032 0 1.739-.216 2.227-.753.265-.288.347-.77.347-1.47V.094c0-.036-.029-.065-.065-.065h-1.116c-.036 0-.065.029-.065.065v3.214zM14.17.066l-2.447 5.342c-.02.043.011.093.06.093h1.172c.027 0 .05-.016.061-.041l.495-1.221c.01-.025.034-.041.06-.041h2.485c.026 0 .05.016.06.04l.512 1.223c.01.024.034.04.06.04h1.242c.048 0 .08-.05.06-.093L15.526.066c-.01-.023-.034-.038-.06-.038H14.23c-.025 0-.049.015-.06.038zm-.201 3.066l.77-1.866c.022-.054.098-.054.12 0l.798 1.865c.019.043-.013.092-.06.092h-1.568c-.046 0-.078-.048-.06-.091zM23.855 4.034L22.606.074c-.008-.027-.034-.046-.062-.046h-1.372c-.03 0-.055.02-.063.048l-1.083 3.949c-.017.063-.106.065-.126.003L18.612.073c-.01-.027-.034-.045-.063-.045h-1.196c-.045 0-.077.045-.061.088l1.926 5.341c.01.026.034.044.062.044h1.42c.03 0 .056-.02.064-.049l1.032-3.818c.017-.064.107-.065.126-.003l1.181 3.824c.009.027.034.046.063.046h1.432c.029 0 .054-.018.063-.045L26.446.115c.014-.043-.018-.087-.063-.087h-1.136c-.029 0-.054.02-.063.047l-1.203 3.958c-.02.062-.106.062-.126 0zM26.743 2.865c0 .99.296 1.708.907 2.155.577.426 1.13.48 1.847.48h2.038c.036 0 .065-.029.065-.065v-.82c0-.037-.029-.066-.065-.066h-1.98c-.957 0-1.381-.344-1.409-1.274 0-.037.03-.068.066-.068h3.323c.036 0 .065-.03.065-.065v-.824c0-.036-.029-.066-.065-.066h-3.31c-.039 0-.07-.032-.066-.07.072-.81.522-1.2 1.397-1.2h1.979c.036 0 .065-.028.065-.065V.094c0-.036-.029-.066-.065-.066H29.53c-1.905 0-2.787.903-2.787 2.837zM32.7.094v5.34c0 .037.029.066.065.066h1.17c.036 0 .065-.03.065-.065V.094c0-.036-.03-.066-.066-.066h-1.169c-.036 0-.066.03-.066.066z" transform="translate(3 3) translate(0 28.422)"/>
</g>
<path fill="#D84848" fill-rule="nonzero" d="M4.74 22.84c-.004-.006-.064-.085-.117-.19l-.016-.026c-.008-.013-.013-.027-.016-.04-.028-.067-.049-.14-.05-.207l-1.238.044c-.102.003-.163.115-.11.203 1.138 1.916 2.974 3.362 4.884 2.918-1.304-.296-2.491-1.367-3.338-2.702z" transform="translate(3 3)"/>
</g>
</g>
</g>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 194 KiB

Some files were not shown because too many files have changed in this diff Show More